def __init__(self, image_shape): # flip axis 0 and axis 1 so indexing is as expected flip_xy = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) # scale to get the units correct scale = Scale(image_shape) self.flip_and_scale = flip_xy.compose_before(scale)
def homog_compose_before_alignment_nonuniformscale_test(): homog = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) scale = UniformScale(2.5, 2) source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]])) target = scale.apply(source) # estimate the transform from source and target s = AlignmentUniformScale(source, target) res = homog.compose_before(s) assert (type(res) == Homogeneous)
def test_homog_compose_before_nonuniformscale(): homog = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) s = NonUniformScale([3, 4]) res = homog.compose_before(s) assert(type(res) == Homogeneous) assert_allclose(res.h_matrix, np.array([[0, 3, 0], [4, 0, 0], [0, 0, 1]]))
def test_homog_compose_before_alignment_nonuniformscale(): homog = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) scale = UniformScale(2.5, 2) source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]])) target = scale.apply(source) # estimate the transform from source and target s = AlignmentUniformScale(source, target) res = homog.compose_before(s) assert(type(res) == Homogeneous)
def retrieve_camera_matrix(image, mesh, group=None, initialize=True): import cv2 drop_h = Homogeneous(np.eye(4)[:3]) flip_xy_yx = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) rows = image.shape[0] cols = image.shape[1] max_d = max(rows, cols) camera_matrix = np.array([[max_d, 0, cols / 2.0], [0, max_d, rows / 2.0], [0, 0, 1.0]]) distortion_coeffs = np.zeros(4) # Initial guess for rotation/translation. if initialize: r_vec = np.array([[-2.7193267], [-0.14545351], [-0.34661788]]) t_vec = np.array([[0.], [0.], [280.]]) converged, r_vec, t_vec = cv2.solvePnP( mesh.landmarks[group].lms.points, image.landmarks[group].lms.points[:, ::-1], camera_matrix, distortion_coeffs, r_vec, t_vec, 1) else: converged, r_vec, t_vec = cv2.solvePnP( mesh.landmarks[group].lms.points, image.landmarks[group].lms.points[:, ::-1], camera_matrix, distortion_coeffs) rotation_matrix = cv2.Rodrigues(r_vec)[0] h_camera_matrix = np.eye(4) h_camera_matrix[:3, :3] = camera_matrix t_vec = t_vec.ravel() if t_vec[2] < 0: print('Position has a negative value in z-axis') c = Homogeneous(h_camera_matrix) t = Translation(t_vec) r = Rotation(rotation_matrix) view_t = r.compose_before(t) proj_t = c.compose_before(drop_h).compose_before(flip_xy_yx) return view_t, c, proj_t
def tcoords_to_image_coords(image_shape): r""" Returns a :map:`Homogeneous` transform that converts [0,1] texture coordinates (tcoords) used on :map:`TexturedTriMesh` instances to image coordinates, which behave just like image landmarks do. The operations that are performed are: - Flipping the origin from bottom-left to top-left - Permuting the axis so that st (or uv) -> yx - Scaling the tcoords by the image shape (denormalising them). Note that (1, 1) has to map to the highest pixel value, which is actually (h - 1, w - 1) due to Menpo being 0-based with image operations. Parameters ---------- image_shape : `tuple` The shape of the texture that the tcoords index in to. Returns ------- :map:`Homogeneous` A transform that, when applied to texture coordinates, converts them to image coordinates. """ # flip the 'y' st 1 -> 0 and 0 -> 1, moving the axis to upper left invert_unit_y = Homogeneous( np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 1.0], [0.0, 0.0, 1.0]]) ) # flip axis 0 and axis 1 so indexing is as expected flip_xy_yx = Homogeneous( np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) ) return invert_unit_y.compose_before(flip_xy_yx).compose_before( Scale(np.array(image_shape) - 1) )