예제 #1
0
파일: lmalign.py 프로젝트: caomw/menpo3d
def retrieve_view_projection_transforms(image, mesh, group=None):

    rows = image.shape[0]
    cols = image.shape[1]
    max_d = max(rows, cols)
    camera_matrix = np.array([[max_d, 0, cols / 2.0], [0, max_d, rows / 2.0],
                              [0, 0, 1.0]])
    distortion_coeffs = np.zeros(4)

    converged, r_vec, t_vec = cv2.solvePnP(
        mesh.landmarks[group].lms.points,
        image.landmarks[group].lms.points[:, ::-1], camera_matrix,
        distortion_coeffs)

    rotation_matrix = cv2.Rodrigues(r_vec)[0]

    h_camera_matrix = np.eye(4)
    h_camera_matrix[:3, :3] = camera_matrix

    c = Homogeneous(h_camera_matrix)
    t = Translation(t_vec.ravel())
    r = Rotation(rotation_matrix)

    view_t_flipped = r.compose_before(t)
    view_t = view_t_flipped.compose_before(axes_flip_t)
    proj_t = Homogeneous(
        weak_projection_matrix(image.width, image.height,
                               view_t_flipped.apply(mesh)))
    return view_t, proj_t, r
def rotation_from_3d_ccw_angle_around_y(theta, degrees=True):
    r"""
    Convenience constructor for 3D CCW rotations around the y axis

    Parameters
    ----------
    theta : `float`
        The angle of rotation about the origin
    degrees : `bool`, optional
        If ``True`` theta is interpreted as a degree. If ``False``, theta is
        interpreted as radians.

    Returns
    -------
    rotation : :map:`Rotation`
        A 3D rotation transform.
    """
    if degrees:
        # convert to radians
        theta = theta * np.pi / 180.0
    return Rotation(np.array([[np.cos(theta), 0,
                               np.sin(theta)], [0, 1, 0],
                              [-np.sin(theta), 0,
                               np.cos(theta)]]),
                    skip_checks=True)
예제 #3
0
def test_basic_2d_rotation_axis_angle():
    rotation_matrix = np.array([[0, 1],
                                [-1, 0]])
    rotation = Rotation(rotation_matrix)
    axis, angle = rotation.axis_and_angle_of_rotation()
    assert_allclose(axis, np.array([0, 0, 1]))
    assert_allclose((90 * np.pi)/180, angle)
예제 #4
0
def test_3d_rotation_as_vector():
    a = np.sqrt(3.0) / 2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0], [0, a, b], [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    assert_allclose(np.round(rotation.as_vector()[2:]), np.array([0., 0.]))
예제 #5
0
def test_3d_rotation_inverse_eye():
    a = np.sqrt(3.0) / 2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0], [0, a, b], [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    transformed = rotation.compose_before(rotation.pseudoinverse())
    assert_allclose(np.eye(4), transformed.h_matrix, atol=1e-15)
예제 #6
0
def test_basic_3d_rotation_axis_angle():
    a = np.sqrt(3.0) / 2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0], [0, a, b], [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    axis, angle = rotation.axis_and_angle_of_rotation()
    assert_allclose(axis, np.array([1, 0, 0]))
    assert_allclose((-30 * np.pi) / 180, angle)
예제 #7
0
def test_align_2d_rotation():
    rotation_matrix = np.array([[0, 1], [-1, 0]])
    rotation = Rotation(rotation_matrix)
    source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]]))
    target = rotation.apply(source)
    # estimate the transform from source and target
    estimate = AlignmentRotation(source, target)
    # check the estimates is correct
    assert_allclose(rotation.h_matrix, estimate.h_matrix, atol=1e-14)
예제 #8
0
def test_rotation_compose_before_homog():
    # can't do this inplace - so should just give transform chain
    rotation = Rotation(np.array([[1, 0],
                                  [0, 1]]))
    homog = Homogeneous(np.array([[0, 1, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]]))
    res = rotation.compose_before(homog)
    assert(type(res) == Homogeneous)
예제 #9
0
def test_align_2d_rotation_set_h_matrix_raises_notimplemented_error():
    rotation_matrix = np.array([[0, 1], [-1, 0]])
    rotation = Rotation(rotation_matrix)
    source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]]))
    target = rotation.apply(source)
    # estimate the transform from source and source
    estimate = AlignmentRotation(source, source)
    # and set the target
    estimate.set_h_matrix(rotation.h_matrix)
예제 #10
0
def test_basic_3d_rotation():
    a = np.sqrt(3.0) / 2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0], [0, a, b], [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    starting_vector = np.array([0, 1, 0])
    transformed = rotation.apply(starting_vector)
    assert_allclose(np.array([0, a, -b]), transformed)
    def _align_mean_shape_with_bbox(self, bbox):
        # Convert 3D landmarks to 2D by removing the Z axis
        template_shape = PointCloud(self.mm.landmarks.points[:, [1, 0]])

        # Rotation that flips over x axis
        rot_matrix = np.eye(template_shape.n_dims)
        rot_matrix[0, 0] = -1
        template_shape = Rotation(rot_matrix,
                                  skip_checks=True).apply(template_shape)

        # Align the 2D landmarks' bbox with the provided bbox
        return AlignmentSimilarity(template_shape.bounding_box(),
                                   bbox).apply(template_shape)
예제 #12
0
def retrieve_camera_matrix(image, mesh, group=None, initialize=True):
    import cv2

    drop_h = Homogeneous(np.eye(4)[:3])
    flip_xy_yx = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]))

    rows = image.shape[0]
    cols = image.shape[1]
    max_d = max(rows, cols)
    camera_matrix = np.array([[max_d, 0, cols / 2.0], [0, max_d, rows / 2.0],
                              [0, 0, 1.0]])
    distortion_coeffs = np.zeros(4)

    # Initial guess for rotation/translation.
    if initialize:
        r_vec = np.array([[-2.7193267], [-0.14545351], [-0.34661788]])
        t_vec = np.array([[0.], [0.], [280.]])
        converged, r_vec, t_vec = cv2.solvePnP(
            mesh.landmarks[group].lms.points,
            image.landmarks[group].lms.points[:, ::-1], camera_matrix,
            distortion_coeffs, r_vec, t_vec, 1)
    else:
        converged, r_vec, t_vec = cv2.solvePnP(
            mesh.landmarks[group].lms.points,
            image.landmarks[group].lms.points[:, ::-1], camera_matrix,
            distortion_coeffs)

    rotation_matrix = cv2.Rodrigues(r_vec)[0]

    h_camera_matrix = np.eye(4)
    h_camera_matrix[:3, :3] = camera_matrix

    t_vec = t_vec.ravel()

    if t_vec[2] < 0:
        print('Position has a negative value in z-axis')

    c = Homogeneous(h_camera_matrix)
    t = Translation(t_vec)
    r = Rotation(rotation_matrix)

    view_t = r.compose_before(t)
    proj_t = c.compose_before(drop_h).compose_before(flip_xy_yx)
    return view_t, c, proj_t
예제 #13
0
def align_2d_3d(points_3d,
                points_image,
                image_shape,
                focal_length=None,
                distortion_coeffs=None):
    try:
        import cv2
    except ImportError:
        raise MenpoMissingDependencyError('opencv3')
    height, width = image_shape
    # Create camera matrix
    focal_length = (max(height, width)
                    if focal_length is None else focal_length)
    c_x = width / 2.0
    c_y = height / 2.0
    camera_matrix = np.array([[focal_length, 0, c_x], [0, focal_length, c_y],
                              [0, 0, 1.0]])

    # If distortion coefficients are None, set them to zero
    if distortion_coeffs is None:
        distortion_coeffs = np.zeros(4)
    # Estimate the camera pose given the 3D sparse pointcloud on the
    # mesh, its 2D projection on the image, the camera matrix and the
    # distortion coefficients
    lm2d = points_image.points[:, ::-1].copy()
    converged, r_vec, t_vec = cv2.solvePnP(points_3d.points, lm2d,
                                           camera_matrix, distortion_coeffs)

    if not converged:
        warnings.warn('cv2.SolvePnP did not converge to a solution')

    # Create rotation and translation transform objects from the vectors
    # acquired at the previous step
    rotation_matrix = cv2.Rodrigues(r_vec)[0]
    r = Rotation(rotation_matrix)
    t = Translation(t_vec.ravel())

    return r, t, focal_length
예제 #14
0
    def init_from_2d_projected_shape(
        cls,
        points_3d,
        points_image,
        image_shape,
        focal_length=None,
        distortion_coeffs=None,
    ):
        from menpo3d.correspond import solve_pnp  # Avoid circular import

        model_view_t, _ = solve_pnp(
            points_image,
            points_3d,
            pinhole_intrinsic_matrix(
                image_shape[0], image_shape[1], focal_length=focal_length
            ),
            distortion_coefficients=distortion_coeffs,
        )
        rotation = Rotation(model_view_t.h_matrix[:3, :3])
        translation = Translation(model_view_t.h_matrix[:3, -1])
        return OrthographicCamera(
            rotation, translation, OrthographicProjection(focal_length, image_shape)
        )
예제 #15
0
def test_rotation2d_n_parameters_raises_notimplementederror():
    rot_matrix = np.eye(2)
    t = Rotation(rot_matrix)
    with raises(NotImplementedError):
        t.n_parameters
예제 #16
0
파일: pnp.py 프로젝트: conansherry/menpo3d
def solve_pnp(
    points_2d,
    points_3d,
    intrinsic_matrix,
    distortion_coefficients=None,
    pnp_method=SOLVEPNP_ITERATIVE,
    n_iterations=100,
    reprojection_error=8.0,
    initial_transform=None,
):
    """
    Use OpenCV to solve the Perspective-N-Point problem (PnP). Uses Ransac PnP as this
    typically provides better results. The image and mesh must both have the same
    landmark group name attached.

    Note the intrinsic matrix (if given) must be in "OpenCV" space and thus
    has the "x" and "y" axes flipped w.r.t the menpo norm. E.g. the intrinsic matrix
    is defined as follows:

        [fx,  0, cx, 0]
        [ 0, fy, cy, 0]
        [ 0,  0,  1, 0]
        [ 0,  0,  0, 1]

    Parameters
    ----------
    points_2d : :map`Pointcloud` or subclass
        The 2D points in the image to solve the PnP problem with.
    points_3d : :map`Pointcloud` or subclass
        The 3D points to solve the PnP problem with
    group : str, optional
        The name of the landmark group
    intrinsic_matrix : :map`Homogeneous`
        The intrinsic matrix - if the intrinsic matrix is unknow please see
        usage of pinhole_intrinsic_matrix()
    distortion_coefficients : ``(D,)`` `ndarray`
        The distortion coefficients (if not given assumes 0 coefficients). See the
        OpenCV documentation for the distortion coefficient types that are supported.
    pnp_method : int
        The OpenCV PNP method e.g. cv2.SOLVEPNP_ITERATIVE or otherwise
    n_iterations : int
        The number of iterations to perform
    reprojection_error : float
        The maximum reprojection error to allow for a point to be considered an
        inlier.
    initial_transform : :map`Homogeneous`
        The initialization for the cv2.SOLVEPNP_ITERATIVE method. Compatible
        with the returned model transformation returned by this method.

    Returns
    -------
    model_view_t : :map`Homogeneous`
        The combined ModelView transform. Can be used to place the 3D points
        in "eye space".
    proj_t : :map`Homogeneous`
        A transform that can be used to project the input 3D points
        back into the image
    """
    import cv2

    if distortion_coefficients is None:
        distortion_coefficients = np.zeros(4)

    r_vec = t_vec = None
    if initial_transform is not None:
        if pnp_method != cv2.SOLVEPNP_ITERATIVE:
            raise ValueError(
                "Initial estimates can only be given to SOLVEPNP_ITERATIVE")
        else:
            r_vec = cv2.Rodrigues(initial_transform.h_matrix[:3, :3])[0]
            t_vec = initial_transform.h_matrix[:3, -1].ravel()

    converged, r_vec, t_vec, _ = cv2.solvePnPRansac(
        points_3d.points,
        points_2d.points[:, ::-1],
        intrinsic_matrix.h_matrix[:3, :3],
        distortion_coefficients,
        flags=pnp_method,
        iterationsCount=n_iterations,
        reprojectionError=reprojection_error,
        useExtrinsicGuess=r_vec is not None,
        rvec=r_vec,
        tvec=t_vec,
    )
    if not converged:
        raise ValueError("cv2.solvePnPRansac failed to converge")

    rotation = Rotation(cv2.Rodrigues(r_vec)[0])
    translation = Translation(t_vec.ravel())

    model_view_t = rotation.compose_before(translation)
    proj_t = intrinsic_matrix.compose_before(
        flip_xy_yx()).compose_before(_drop_h)
    return model_view_t, proj_t
예제 #17
0
def test_basic_2d_rotation():
    rotation_matrix = np.array([[0, 1],
                                [-1, 0]])
    rotation = Rotation(rotation_matrix)
    assert_allclose(np.array([0, -1]), rotation.apply(np.array([1, 0])))
예제 #18
0
def test_rotation3d_n_parameters_raises_notimplementederror():
    rot_matrix = np.eye(3)
    t = Rotation(rot_matrix)
    # Throws exception
    t.n_parameters
예제 #19
0
def test_rotation_set_h_matrix_raises_notimplementederror():
    r = Rotation(np.array([[1, 0], [0, 1]]))
    r.set_h_matrix(r.h_matrix)