Exemple #1
0
def absolute_pose_ransac(bs, Xs, method, threshold, iterations, probabilty):
    # in-house estimation
    if in_house_multiview:
        threshold = np.arccos(1 - threshold)
        params = pyrobust.RobustEstimatorParams()
        params.iterations = 1000
        result = pyrobust.ransac_absolute_pose(bs, Xs, threshold, params,
                                               pyrobust.RansacType.RANSAC)

        Rt = result.lo_model.copy()
        R, t = Rt[:3, :3].copy(), Rt[:, 3].copy()
        Rt[:3, :3] = R.T
        Rt[:, 3] = -R.T.dot(t)
        return Rt
    else:
        try:
            return pyopengv.absolute_pose_ransac(bs,
                                                 Xs,
                                                 method,
                                                 threshold,
                                                 iterations=iterations,
                                                 probabilty=probabilty)
        except Exception:
            # Older versions of pyopengv do not accept the probability argument.
            return pyopengv.absolute_pose_ransac(bs, Xs, method, threshold,
                                                 iterations)
Exemple #2
0
def absolute_pose_known_rotation_ransac(bs, Xs, method, threshold, iterations,
                                        probabilty):
    # in-house estimation
    if in_house_multiview:
        threshold = np.arccos(1 - threshold)
        params = pyrobust.RobustEstimatorParams()
        params.iterations = 1000
        result = pyrobust.ransac_absolute_pose_known_rotation(
            bs, Xs, threshold, params, pyrobust.RansacType.RANSAC)

        t = -result.lo_model.copy()
        R = np.identity(3)
        return np.concatenate((R, [[t[0]], [t[1]], [t[2]]]), axis=1)
    else:
        try:
            return pyopengv.absolute_pose_ransac(bs,
                                                 Xs,
                                                 method,
                                                 threshold,
                                                 iterations=iterations,
                                                 probabilty=probabilty)
        except Exception:
            # Older versions of pyopengv do not accept the probability argument.
            return pyopengv.absolute_pose_ransac(bs, Xs, method, threshold,
                                                 iterations)
Exemple #3
0
def absolute_pose_ransac(bs, Xs, method, threshold, iterations, probabilty):
    try:
        return pyopengv.absolute_pose_ransac(
            bs, Xs, method, threshold,
            iterations=iterations,
            probabilty=probabilty)
    except Exception:
        # Older versions of pyopengv do not accept the probability argument.
        return pyopengv.absolute_pose_ransac(
            bs, Xs, method, threshold, iterations)
Exemple #4
0
def absolute_pose_ransac(bs, Xs, method, threshold, iterations, probabilty):
    try:
        return pyopengv.absolute_pose_ransac(
            bs, Xs, method, threshold,
            iterations=iterations,
            probabilty=probabilty)
    except Exception:
        # Older versions of pyopengv do not accept the probability argument.
        return pyopengv.absolute_pose_ransac(
            bs, Xs, method, threshold, iterations)
Exemple #5
0
def resect(data, graph, reconstruction, shot_id):
    """Try resecting and adding a shot to the reconstruction.

    Return:
        True on success.
    """
    exif = data.load_exif(shot_id)
    camera = reconstruction.cameras[exif['camera']]

    # 1. collect all tracks that is in the reconstruction and this image
    # pixel bearing and reconstructed 3D positions
    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction.points:
            x = graph[track][shot_id]['feature']
            b = camera.pixel_bearing(x)
            bs.append(b)
            Xs.append(reconstruction.points[track].coordinates)
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False

    # 2. estimate the pose of this camera using KNEIP method
    threshold = data.config.get('resection_threshold', 0.004)
    T = pyopengv.absolute_pose_ransac(
        bs, Xs, "KNEIP", 1 - np.cos(threshold), 1000)

    R = T[:, :3]
    t = T[:, 3]

    # 3. reproject all points and figure out which is inliers
    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = sum(inliers)

    # 4. output resecting inliners
    logger.info("{} resection inliers: {} / {}".format(
        shot_id, ninliers, len(bs)))
    if ninliers >= data.config.get('resection_min_inliers', 15):
        # 5. if inliers are enough, then add this shot to the reconstruction
        R = T[:, :3].T
        t = -R.dot(T[:, 3])
        shot = types.Shot()
        shot.id = shot_id
        shot.camera = camera
        shot.pose = types.Pose()
        shot.pose.set_rotation_matrix(R)
        shot.pose.translation = t
        shot.metadata = get_image_metadata(data, shot_id)
        reconstruction.add_shot(shot)

        # 6. and do single view bundle adjustment
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True
    else:
        return False
Exemple #6
0
def calc_inintial_guess(corners_in_img_arr, corners_in_pcd_arr, method="UPNP"):
    # number_of_points_for_initial = int(corners_in_img_arr.shape[0])
    img_bearing_vectors = []
    if params["camera_type"] == "panoramic":
        for pix in corners_in_img_arr:
            angs = pixel2angle(pix)
            img_bearing_vectors.append([
                np.cos(angs[0]) * np.cos(angs[1]),
                np.cos(angs[0]) * np.sin(angs[1]),
                np.sin(angs[0])
            ])
    elif params["camera_type"] == "perspective":
        inv_K = np.linalg.inv(intrinsic_paras)
        tmp_corners_in_img = np.hstack([
            corners_in_img_arr,
            1 + np.zeros(corners_in_img_arr.shape[0]).reshape(-1, 1)
        ])
        for pix in tmp_corners_in_img:
            tmp = np.dot(inv_K, pix.T).T
            img_bearing_vectors.append(tmp / np.linalg.norm(tmp))
    else:
        raise Exception("camera_type define error!")

    img_bearing_vectors = np.array(img_bearing_vectors)
    pcd_bearing_vectors = np.array(corners_in_pcd_arr) / np.linalg.norm(
        corners_in_pcd_arr, axis=1).reshape(-1, 1)
    #
    # ransac_transformation = pyopengv.relative_pose_ransac(img_bearing_vectors, pcd_bearing_vectors, "NISTER", 0.01,
    #                                                       1000)
    # # ransac_transformation = pyopengv.relative_pose_fivept_kneip(img_bearing_vectors, pcd_bearing_vectors)

    if method == "RANSAC":
        transformation = pyopengv.absolute_pose_ransac(img_bearing_vectors,
                                                       pcd_bearing_vectors,
                                                       "UPNP", 0.001, 100000)
    elif method == "EPNP":
        transformation = pyopengv.absolute_pose_epnp(img_bearing_vectors,
                                                     pcd_bearing_vectors)
    elif method == "UPNP":
        transformation = pyopengv.absolute_pose_upnp(img_bearing_vectors,
                                                     pcd_bearing_vectors)[0]
    else:
        raise Exception("Opengv method error!")

    print "initial guess by relative pose: ", transformation
    # print ransac_transformation
    angs = rotationMatrixToEulerAngles(transformation[:3, :3].T).tolist()
    ret = []
    ret.extend(angs)
    # scale = least_squares(cal_scale_cost, x0=np.random.random(),
    #                       args=(img_bearing_vectors, pcd_bearing_vectors, corners_in_pcd_arr, ransac_transformation),
    #                       method="lm", ftol=1e-10, max_nfev=20000)
    # print scale
    # print "estimated scale: ", scale.x
    # print scale.x * ransac_transformation[:3, 3]
    # ret.extend((scale.x * ransac_transformation[:3, 3]).tolist())
    ret.extend((-transformation[:3, 3]).tolist())
    return np.array(ret)
Exemple #7
0
def resect(data, graph, reconstruction, shot_id):
    """Try resecting and adding a shot to the reconstruction.

    Return:
        True on success.
    """
    exif = data.load_exif(shot_id)
    camera = reconstruction.cameras[exif['camera']]

    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction.points:
            x = graph[track][shot_id]['feature']
            b = camera.pixel_bearing(x)
            bs.append(b)
            Xs.append(reconstruction.points[track].coordinates)
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False, {'num_common_points': len(bs)}

    threshold = data.config['resection_threshold']
    T = pyopengv.absolute_pose_ransac(bs, Xs, "KNEIP", 1 - np.cos(threshold),
                                      1000)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = sum(inliers)

    logger.info("{} resection inliers: {} / {}".format(shot_id, ninliers,
                                                       len(bs)))
    report = {
        'num_common_points': len(bs),
        'num_inliers': ninliers,
    }
    if ninliers >= data.config['resection_min_inliers']:
        R = T[:, :3].T
        t = -R.dot(T[:, 3])
        shot = types.Shot()
        shot.id = shot_id
        shot.camera = camera
        shot.pose = types.Pose()
        shot.pose.set_rotation_matrix(R)
        shot.pose.translation = t
        shot.metadata = get_image_metadata(data, shot_id)
        reconstruction.add_shot(shot)
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True, report
    else:
        return False, report
def resect(data, graph, reconstruction, shot_id):
    """Try resecting and adding a shot to the reconstruction.

    Return:
        True on success.
    """
    exif = data.load_exif(shot_id)
    camera = reconstruction.cameras[exif['camera']]

    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction.points:
            x = graph[track][shot_id]['feature']
            b = camera.pixel_bearing(x)
            bs.append(b)
            Xs.append(reconstruction.points[track].coordinates)
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False, {'num_common_points': len(bs)}

    threshold = data.config['resection_threshold']
    T = pyopengv.absolute_pose_ransac(
        bs, Xs, "KNEIP", 1 - np.cos(threshold), 1000)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = int(sum(inliers))

    logger.info("{} resection inliers: {} / {}".format(
        shot_id, ninliers, len(bs)))
    report = {
        'num_common_points': len(bs),
        'num_inliers': ninliers,
    }
    if ninliers >= data.config['resection_min_inliers']:
        R = T[:, :3].T
        t = -R.dot(T[:, 3])
        shot = types.Shot()
        shot.id = shot_id
        shot.camera = camera
        shot.pose = types.Pose()
        shot.pose.set_rotation_matrix(R)
        shot.pose.translation = t
        shot.metadata = get_image_metadata(data, shot_id)
        reconstruction.add_shot(shot)
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True, report
    else:
        return False, report
def resect(data, graph, reconstruction, shot_id):
    '''Add a shot to the reconstruction.
    '''
    exif = data.load_exif(shot_id)
    camera = reconstruction.cameras[exif['camera']]

    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction.points:
            x = graph[track][shot_id]['feature']
            b = camera.pixel_bearing(x)
            bs.append(b)
            Xs.append(reconstruction.points[track].coordinates)
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False

    threshold = data.config.get('resection_threshold', 0.004)
    T = pyopengv.absolute_pose_ransac(bs, Xs, "KNEIP", 1 - np.cos(threshold),
                                      1000)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = sum(inliers)

    print 'Resection', shot_id, 'inliers:', ninliers, '/', len(bs)
    if ninliers >= data.config.get('resection_min_inliers', 15):
        R = T[:, :3].T
        t = -R.dot(T[:, 3])
        shot = types.Shot()
        shot.id = shot_id
        shot.camera = camera
        shot.pose = types.Pose()
        shot.pose.set_rotation_matrix(R)
        shot.pose.translation = t
        shot.metadata = get_image_metadata(data, shot_id)
        reconstruction.add_shot(shot)
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True
    else:
        return False
Exemple #10
0
def resect(data, graph, reconstruction, shot_id):
    '''Add a shot to the reconstruction.
    '''
    exif = data.load_exif(shot_id)
    camera = reconstruction.cameras[exif['camera']]

    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction.points:
            x = graph[track][shot_id]['feature']
            b = camera.pixel_bearing(x)
            bs.append(b)
            Xs.append(reconstruction.points[track].coordinates)
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False

    threshold = data.config.get('resection_threshold', 0.004)
    T = pyopengv.absolute_pose_ransac(bs, Xs, "KNEIP", 1 - np.cos(threshold), 1000)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = sum(inliers)

    print 'Resection', shot_id, 'inliers:', ninliers, '/', len(bs)
    if ninliers >= data.config.get('resection_min_inliers', 15):
        R = T[:, :3].T
        t = -R.dot(T[:, 3])
        shot = types.Shot()
        shot.id = shot_id
        shot.camera = camera
        shot.pose = types.Pose()
        shot.pose.set_rotation_matrix(R)
        shot.pose.translation = t
        shot.metadata = get_image_metadata(data, shot_id)
        reconstruction.add_shot(shot)
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True
    else:
        return False
Exemple #11
0
def resect(data, graph, reconstruction, shot_id):
    '''Add a shot to the reconstruction.
    '''
    exif = data.load_exif(shot_id)
    camera_id = exif['camera']
    camera = reconstruction['cameras'][camera_id]

    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction['points']:
            x = graph[track][shot_id]['feature']
            b = multiview.pixel_bearing(x, camera)
            bs.append(b)
            Xs.append(reconstruction['points'][track]['coordinates'])
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False

    threshold = data.config.get('resection_threshold', 0.004)
    T = pyopengv.absolute_pose_ransac(bs, Xs, "KNEIP", 1 - np.cos(threshold),
                                      1000)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = sum(inliers)

    print 'Resection', shot_id, 'inliers:', ninliers, '/', len(bs)
    if ninliers >= data.config.get('resection_min_inliers', 15):
        R = cv2.Rodrigues(T[:, :3].T)[0].ravel()
        t = -T[:, :3].T.dot(T[:, 3])
        reconstruction['shots'][shot_id] = {
            "camera": camera_id,
            "rotation": list(R.flat),
            "translation": list(t.flat),
        }
        add_gps_position(data, reconstruction['shots'][shot_id], shot_id)
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True
    else:
        return False
Exemple #12
0
def calc_inintial_guess(corners_in_img_arr, corners_in_pcd_arr, method="UPNP"):
    # number_of_points_for_initial = int(corners_in_img_arr.shape[0])
    img_bearing_vectors = []
    for pix in corners_in_img_arr:
        angs = pixel2angle(pix)
        img_bearing_vectors.append([
            np.cos(angs[0]) * np.cos(angs[1]),
            np.cos(angs[0]) * np.sin(angs[1]),
            np.sin(angs[0])
        ])
    img_bearing_vectors = np.array(img_bearing_vectors)
    pcd_bearing_vectors = np.array(corners_in_pcd_arr) / np.linalg.norm(
        corners_in_pcd_arr, axis=1).reshape(-1, 1)
    #
    # ransac_transformation = pyopengv.relative_pose_ransac(img_bearing_vectors, pcd_bearing_vectors, "NISTER", 0.01,
    #                                                       1000)
    # # ransac_transformation = pyopengv.relative_pose_fivept_kneip(img_bearing_vectors, pcd_bearing_vectors)

    if method == "RANSAC":
        transformation = pyopengv.absolute_pose_ransac(img_bearing_vectors,
                                                       pcd_bearing_vectors,
                                                       "UPNP", 0.001, 100000)
    elif method == "EPNP":
        transformation = pyopengv.absolute_pose_epnp(img_bearing_vectors,
                                                     pcd_bearing_vectors)
    elif method == "UPNP":
        transformation = pyopengv.absolute_pose_upnp(img_bearing_vectors,
                                                     pcd_bearing_vectors)[0]

    # print "initial guess by relative pose: ", transformation
    # print ransac_transformation
    angs = rotationMatrixToEulerAngles(transformation[:3, :3].T).tolist()
    ret = []
    ret.extend(angs)
    # scale = least_squares(cal_scale_cost, x0=np.random.random(),
    #                       args=(img_bearing_vectors, pcd_bearing_vectors, corners_in_pcd_arr, ransac_transformation),
    #                       method="lm", ftol=1e-10, max_nfev=20000)
    # print scale
    # print "estimated scale: ", scale.x
    # print scale.x * ransac_transformation[:3, 3]
    # ret.extend((scale.x * ransac_transformation[:3, 3]).tolist())
    ret.extend((-transformation[:3, 3]).tolist())
    return np.array(ret)
Exemple #13
0
def test_absolute_pose_ransac():
    outliers = 0.25
    noise = 0.01
    d = RelativePoseDataset(100, noise, outliers)

    iterations = 100
    th = 100000.

    print(d.bearing_vectors2.shape)

    return

    ransac_transformation = pyopengv.absolute_pose_ransac(
        d.bearing_vectors2, d.points, "KNEIP", th, iterations)

    print("\n=========================================")
    print("\n=========================================")
    print("\nTest Absolute Pose RANSAC with :\n")
    print(" - %s points" % len(d.points))
    print(" - %s d'outliers" % (outliers * len(d.points)))
    print(" - Threshold : %s / Iterations : %s" % (th, iterations))

    print(
        "\nResult / Truth: \n %s" % np.hstack(
            (ransac_transformation[0][:, 3].reshape(
                3, 1), d.position.reshape(3, 1))))
    print(
        "Diff : %s \n" % abs(ransac_transformation[0][:, 3].reshape(3, 1) -
                             d.position.reshape(3, 1)).mean())
    print("Inliers : \n %s \n" % len(ransac_transformation[1]))

    ransac_transformation = pyopengv.absolute_pose_ransac_optimize(
        d.bearing_vectors2, d.points, "KNEIP", th, iterations)

    print(
        "Optimized Result / Truth: \n %s" % np.hstack(
            (ransac_transformation[0][:, 3].reshape(
                3, 1), d.position.reshape(3, 1))))
    print(
        "Diff : %s \n" % abs(ransac_transformation[0][:, 3].reshape(3, 1) -
                             d.position.reshape(3, 1)).mean())

    print "Done testing absolute pose ransac"
Exemple #14
0
def resect(data, graph, reconstruction, shot_id):
    """Add a shot to the reconstruction.
    """
    exif = data.load_exif(shot_id)
    camera_id = exif["camera"]
    camera = reconstruction["cameras"][camera_id]

    bs = []
    Xs = []
    for track in graph[shot_id]:
        if track in reconstruction["points"]:
            x = graph[track][shot_id]["feature"]
            b = multiview.pixel_bearing(x, camera)
            bs.append(b)
            Xs.append(reconstruction["points"][track]["coordinates"])
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False

    threshold = data.config.get("resection_threshold", 0.004)
    T = pyopengv.absolute_pose_ransac(bs, Xs, "KNEIP", 1 - np.cos(threshold), 1000)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = sum(inliers)

    print "Resection", shot_id, "inliers:", ninliers, "/", len(bs)
    if ninliers >= data.config.get("resection_min_inliers", 15):
        R = cv2.Rodrigues(T[:, :3].T)[0].ravel()
        t = -T[:, :3].T.dot(T[:, 3])
        reconstruction["shots"][shot_id] = {"camera": camera_id, "rotation": list(R.flat), "translation": list(t.flat)}
        add_gps_position(data, reconstruction["shots"][shot_id], shot_id)
        bundle_single_view(graph, reconstruction, shot_id, data.config)
        return True
    else:
        return False
Exemple #15
0
def PnP(pc_to_align, pc_ref, desc_to_align, desc_ref, init_T, K, **kwargs):
    match_function = kwargs.pop('match_function', None)
    desc_function = kwargs.pop('desc_function', None)
    fit_pc = kwargs.pop('fit_pc', False)
    pnp_algo = kwargs.pop('pnp_algo', 'GAO')
    ransac_threshold = kwargs.pop('ransac_threshold', 0.0002)
    iterations = kwargs.pop('iterations', 1000)
    inliers_threshold = kwargs.pop('inliers_threshold', 0.1)
    diff_max = kwargs.pop('diff_max', 4.0)
    return_inliers_ratio = kwargs.pop('return_inliers_ratio', False)
    unfit = kwargs.pop('unfit', True)
    '''
        Algo are: KNEIP - GAO - EPNP - TWOPT - GP3P
    '''

    timing = False
    if timing:
        t_beg = time.time()

    if kwargs:
        raise TypeError('Unexpected **kwargs: %r' % kwargs)

    if desc_function is not None:
        desc_ref = desc_function(pc_ref, desc_ref)
    else:
        desc_ref = pc_ref

    pc_rec = init_T.matmul(pc_to_align)

    if desc_function is not None:
        desc_ta = desc_function(pc_rec, desc_to_align)
    else:
        desc_ta = pc_rec

    if fit_pc:
        #match_function.fit(pc_ref[0])
        match_function.fit(pc_rec)
    else:
        #match_function.fit(desc_ref[0])
        match_function.fit(desc_ta)

    #res_match = match_function(pc_rec, pc_ref, desc_ta, desc_ref)
    res_match = match_function(pc_ref, pc_rec, desc_ref, desc_ta)
    if 'inliers' in res_match.keys():
        #pc_to_align = pc_to_align[0, :, res_match['inliers'][0].byte()].unsqueeze(0)
        pc_to_align = torch.inverse(init_T).matmul(
            res_match['nn'][0, :, res_match['inliers'][0].byte()].unsqueeze(0))
        #res_match['nn'] =  res_match['nn'][0, :, res_match['inliers'][0].byte()].unsqueeze(0)
        res_match['nn'] = pc_ref[0, :,
                                 res_match['inliers'][0].byte()].unsqueeze(0)

        if pc_to_align.size(2) < 4:
            logger.warning(
                "Less than 4 inliers founded, retuturning intial pose")
            if unfit:
                match_function.unfit()
            res = {'T': init_T}
            if return_inliers_ratio:
                res['inliers'] = 0.0
            return res

    keypoints = reproject_back(pc_to_align, K.squeeze())

    bearing_vector = keypoints_to_bearing(keypoints, K.squeeze())

    non_nan_idx, _ = torch.min(bearing_vector == bearing_vector, dim=0)
    bearing_vector = bearing_vector[:, non_nan_idx]
    corr3d_pt = res_match['nn'][0, :3, non_nan_idx]

    T = pyopengv.absolute_pose_ransac(bearing_vector.t().cpu().numpy(),
                                      corr3d_pt.t().cpu().numpy(),
                                      algo_name=pnp_algo,
                                      threshold=ransac_threshold,
                                      iterations=iterations)

    with open("ransac_inliers.txt", 'r') as f:
        inliers = int(f.read())
        #algo_name = pnp_algo, threshold = 0.0002, iterations = 1000)
    #T = pyopengv.absolute_pose_epnp(bearing_vector.t().cpu().numpy(), corr3d_pt.t().cpu().numpy())
    if pc_to_align.device == 'gpu':
        T = T.cuda()

    if timing:
        print('Iteration on {}s'.format(time.time() - t))

    if unfit:
        match_function.unfit()

    if timing:
        print('Pnp converge on {}s'.format(time.time() - t_beg))

    inliers_ratio = inliers / pc_to_align.size(2)

    final_T = pc_ref.new_zeros(4, 4)
    final_T[3, 3] = 1.0
    final_T[:3, :] = pc_ref.new_tensor(T)
    T_diff = torch.norm(init_T[0] - final_T)
    logger.debug('Inliers ratio: {}'.format(inliers_ratio))
    logger.debug('Diff in pose is {}'.format(T_diff.item()))

    if inliers_ratio < inliers_threshold or T_diff > diff_max:
        logger.debug('Not enought inliers (ratio: {})'.format(inliers_ratio))
        res = {'T': init_T}
        if return_inliers_ratio:
            res['inliers'] = inliers_ratio

        return res

    res = {'T': final_T.unsqueeze(0)}
    if return_inliers_ratio:
        res['inliers'] = inliers_ratio

    return res
Exemple #16
0
def test_relative_pose_triangulated():

    # Creating Random Poses / Points
    d = RelativePoseDataset(500, 0.002, 0.15)

    result = pyopengv.relative_pose_ransac_optimize(d.bearing_vectors1,
                                                    d.bearing_vectors2,
                                                    "NISTER", 4., 1000)

    E = result[0]

    print("Testing mini Odom\n")
    print("Relative Pose Computed : \n")

    R = E[:, :3]
    t = normalized(E[:, 3])

    print("Truth : \n %s \n Normalized Truth : %s \n" %
          (d.position, normalized(d.position)))
    print("Got : \n %s \n Normalized translation : \n %s \n" % (E, t))
    print("%s inliers / %s points \n" % (len(result[1]), len(d.points)))

    bearing_vectors_to_erase = []

    inliers = result[1]

    for i in xrange(len(d.bearing_vectors1)):
        if inliers != []:
            if inliers[0] == i:
                inliers.pop(0)
            else:
                bearing_vectors_to_erase.append(i)
        else:
            bearing_vectors_to_erase.append(i)

    bv_1 = np.delete(d.bearing_vectors1, bearing_vectors_to_erase, axis=0)
    bv_2 = np.delete(d.bearing_vectors2, bearing_vectors_to_erase, axis=0)

    pts_3d = pyopengv.triangulation_triangulate2(bv_1, bv_2, t, R)

    abs_pose = pyopengv.absolute_pose_ransac_optimize(bv_2, pts_3d, "KNEIP",
                                                      2., 500)

    print("Absolute Pose Computed : \n %s \n" % abs_pose[0])
    print("%s inliers / %s points \n" % (len(abs_pose[1]), len(pts_3d)))

    # for i in xrange(len(d.points)):
    #     d.bearing_vectors2[i] = R.T.dot(d.points[i] - t)

    result_ = pyopengv.absolute_pose_ransac(d.bearing_vectors2, d.points,
                                            "KNEIP", 0.001, 1000)

    print("\n----------------------\n")
    print("TEST \n")
    print("Truth : \n")
    print(np.hstack((d.rotation, d.position.reshape(3, 1))))
    print("Essential M. : \n")
    print(E)
    print()
    print(np.hstack((R, t.reshape(3, 1))))
    print()
    print(np.hstack((R, t.reshape(3, 1))) / E)

    print("\n Truth : \n")
    print(d.points[:5])
    print()
    print(d.bearing_vectors1[:5])
    print()
    print((d.bearing_vectors1 / d.points)[:5])
    print()

    print("\nAbsolute Pose : \n")
    print(normalized(result_[0][:, 3]))
    print()
    print(t)