def non_rigid_icp_generator( source, target, eps=1e-3, stiffness_weights=None, data_weights=None, landmark_group=None, landmark_weights=None, v_i_update_func=None, verbose=False, ): r""" Deforms the source trimesh to align with to optimally the target. """ # If landmarks are provided, we should always start with a simple # AlignmentSimilarity between the landmarks to initialize optimally. if landmark_group is not None: if verbose: print("'{}' landmarks will be used as " "a landmark constraint.".format(landmark_group)) print("performing similarity alignment using landmarks") lm_align = AlignmentSimilarity( source.landmarks[landmark_group], target.landmarks[landmark_group]).as_non_alignment() source = lm_align.apply(source) # Scale factors completely change the behavior of the algorithm - always # rescale the source down to a sensible size (so it fits inside box of # diagonal 1) and is centred on the origin. We'll undo this after the fit # so the user can use whatever scale they prefer. tr = Translation(-1 * source.centre()) sc = UniformScale(1.0 / np.sqrt(np.sum(source.range()**2)), 3) prepare = tr.compose_before(sc) source = prepare.apply(source) target = prepare.apply(target) # store how to undo the similarity transform restore = prepare.pseudoinverse() n_dims = source.n_dims # Homogeneous dimension (1 extra for translation effects) h_dims = n_dims + 1 points, trilist = source.points, source.trilist n = points.shape[0] # record number of points edge_tris = source.boundary_tri_index() M_s, unique_edge_pairs = node_arc_incidence_matrix(source) # weight matrix G = np.identity(n_dims + 1) M_kron_G_s = sp.kron(M_s, G) # build octree for finding closest points on target. target_vtk = trimesh_to_vtk(target) closest_points_on_target = VTKClosestPointLocator(target_vtk) # save out the target normals. We need them for the weight matrix. target_tri_normals = target.tri_normals() # init transformation X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T v_i = points if stiffness_weights is not None: if verbose: print("using user-defined stiffness_weights") validate_weights("stiffness_weights", stiffness_weights, source.n_points, verbose=verbose) else: # these values have been empirically found to perform well for well # rigidly aligned facial meshes stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2] if verbose: print("using default " "stiffness_weights: {}".format(stiffness_weights)) n_iterations = len(stiffness_weights) if landmark_weights is not None: if verbose: print("using user defined " "landmark_weights: {}".format(landmark_weights)) elif landmark_group is not None: # these values have been empirically found to perform well for well # rigidly aligned facial meshes landmark_weights = [5, 2, 0.5, 0, 0, 0, 0, 0] if verbose: print("using default " "landmark_weights: {}".format(landmark_weights)) else: # no landmark_weights provided - no landmark_group in use. We still # need a landmark group for the iterator landmark_weights = [None] * n_iterations # We should definitely have some landmark weights set now - check the # number is correct. # Note we say verbose=False, as we have done custom reporting above, and # per-vertex landmarks are not supported. validate_weights( "landmark_weights", landmark_weights, source.n_points, n_iterations=n_iterations, verbose=False, ) if data_weights is not None: if verbose: print("using user-defined data_weights") validate_weights( "data_weights", data_weights, source.n_points, n_iterations=n_iterations, verbose=verbose, ) else: data_weights = [None] * n_iterations if verbose: print("Not customising data_weights") # we need to prepare some indices for efficient construction of the D # sparse matrix. row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(), np.arange(n))) x = np.arange(n * h_dims).reshape((n, h_dims)) col = np.hstack((x[:, :n_dims].ravel(), x[:, n_dims])) o = np.ones(n) if landmark_group is not None: source_lm_index = source.distance_to( source.landmarks[landmark_group]).argmin(axis=0) target_lms = target.landmarks[landmark_group] U_L = target_lms.points n_landmarks = target_lms.n_points lm_mask = np.in1d(row, source_lm_index) col_lm = col[lm_mask] # pull out the rows for the lms - but the values are # all wrong! need to map them back to the order of the landmarks row_lm_to_fix = row[lm_mask] source_lm_index_l = list(source_lm_index) row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix]) for i, (alpha, beta, gamma) in enumerate( zip(stiffness_weights, landmark_weights, data_weights), 1): alpha_is_per_vertex = isinstance(alpha, np.ndarray) if alpha_is_per_vertex: # stiffness is provided per-vertex if alpha.shape[0] != source.n_points: raise ValueError() alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1) alpha_M_s = sp.diags(alpha_per_edge).dot(M_s) alpha_M_kron_G_s = sp.kron(alpha_M_s, G) else: # stiffness is global - just a scalar multiply. Note that here # we don't have to recalculate M_kron_G_s alpha_M_kron_G_s = alpha * M_kron_G_s if verbose: a_str = (alpha if not alpha_is_per_vertex else "min: {:.2f}, max: {:.2f}".format( alpha.min(), alpha.max())) i_str = "{}/{}: stiffness: {}".format(i, len(stiffness_weights), a_str) if landmark_group is not None: i_str += " lm_weight: {}".format(beta) print(i_str) j = 0 while True: # iterate until convergence j += 1 # track the iterations for this alpha/landmark weight # find nearest neighbour and the normals U, tri_indices = closest_points_on_target(v_i) # ---- WEIGHTS ---- # 1. Edges # Are any of the corresponding tris on the edge of the target? # Where they are we return a false weight (we *don't* want to # include these points in the solve) w_i_e = np.in1d(tri_indices, edge_tris, invert=True) # 2. Normals # Calculate the normals of the current v_i v_i_tm = TriMesh(v_i, trilist=trilist, copy=False) v_i_n = v_i_tm.vertex_normals() # Extract the corresponding normals from the target u_i_n = target_tri_normals[tri_indices] # If the dot of the normals is lt 0.9 don't contrib to deformation w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9 # 3. Self-intersection # This adds approximately 12% to the running cost and doesn't seem # to be very critical in helping mesh fitting performance so for # now it's removed. Revisit later. # # Build an intersector for the current deformed target # intersect = build_intersector(to_vtk(v_i_tm)) # # budge the source points 1% closer to the target # source = v_i + ((U - v_i) * 0.5) # # if the vector from source to target intersects the deformed # # template we don't want to include it in the optimisation. # problematic = [i for i, (s, t) in enumerate(zip(source, U)) # if len(intersect(s, t)[0]) > 0] # print(len(problematic) * 1.0 / n) # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool) # w_i_i[problematic] = False # Form the overall w_i from the normals, edge case # for now disable the edge constraint (it was noisy anyway) w_i = w_i_n # w_i = np.logical_and(w_i_n, w_i_e).astype(np.float) # we could add self intersection at a later date too... # w_i = np.logical_and(np.logical_and(w_i_n, # w_i_e, # w_i_i).astype(np.float) prop_w_i = (n - w_i.sum() * 1.0) / n prop_w_i_n = (n - w_i_n.sum() * 1.0) / n prop_w_i_e = (n - w_i_e.sum() * 1.0) / n if gamma is not None: w_i = w_i * gamma # Build the sparse diagonal weight matrix W_s = sp.diags(w_i.astype(np.float)[None, :], [0]) data = np.hstack((v_i.ravel(), o)) D_s = sp.coo_matrix((data, (row, col))) to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)] to_stack_B = [ np.zeros((alpha_M_kron_G_s.shape[0], n_dims)), U * w_i[:, None], ] # nullify nearest points by w_i if landmark_group is not None: D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)), shape=(n_landmarks, D_s.shape[1])) to_stack_A.append(beta * D_L) to_stack_B.append(beta * U_L) A_s = sp.vstack(to_stack_A).tocsr() B_s = sp.vstack(to_stack_B).tocsr() X = spsolve(A_s, B_s) # deform template v_i_prev = v_i v_i = D_s.dot(X) delta_v_i = v_i - v_i_prev if v_i_update_func: # custom logic is provided to update the current template # deformation. This is typically used by Active NICP. # take the v_i points matrix and convert back to a TriMesh in # the original space def_template = restore.apply(source.from_vector(v_i.ravel())) # perform the update updated_def_template = v_i_update_func(def_template) # convert back to points in the NICP space v_i = prepare.apply(updated_def_template.points) err = np.linalg.norm(X_prev - X, ord="fro") stop_criterion = err / np.sqrt(np.size(X_prev)) if landmark_group is not None: src_lms = v_i[source_lm_index] lm_err = np.sqrt((src_lms - U_L)**2).sum(axis=1).mean() if verbose: v_str = (" - {} stop crit: {:.3f} " "total: {:.0%} norms: {:.0%} " "edges: {:.0%}".format(j, stop_criterion, prop_w_i, prop_w_i_n, prop_w_i_e)) if landmark_group is not None: v_str += " lm_err: {:.4f}".format(lm_err) print(v_str) X_prev = X # track the progress of the algorithm per-iteration info_dict = { "alpha": alpha, "iteration": j, "prop_omitted": prop_w_i, "prop_omitted_norms": prop_w_i_n, "prop_omitted_edges": prop_w_i_e, "delta": err, "mask_normals": w_i_n, "mask_edges": w_i_e, "mask_all": w_i, "nearest_points": restore.apply(U), "deformation_per_step": delta_v_i, } current_instance = source.copy() current_instance.points = v_i.copy() if landmark_group: info_dict["beta"] = beta info_dict["lm_err"] = lm_err current_instance.landmarks[landmark_group] = PointCloud( src_lms) yield restore.apply(current_instance), info_dict if stop_criterion < eps: break
def align_mesh_to_template(source, target, scale_corrective=1.2): scale = Scale((target.norm() / source.norm()) * scale_corrective, n_dims=target.n_dims) translation = Translation(target.centre() - source.centre()) return translation.compose_before(scale)
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True, warp_mode='constant'): """basic image augmentation: random crop, rotation and horizontal flip""" #from menpo def round_image_shape(shape, round): if round not in ['ceil', 'round', 'floor']: raise ValueError('round must be either ceil, round or floor') # Ensure that the '+' operator means concatenate tuples return tuple(getattr(np, round)(shape).astype(np.int)) # taken from MDM def mirror_landmarks_68(lms, im_size): return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector( ).reshape(-1, 2))[mirrored_parts_68]) # taken from MDM def mirror_image(im): im = im.copy() im.pixels = im.pixels[..., ::-1].copy() for group in im.landmarks: lms = im.landmarks[group] if lms.points.shape[0] == 68: im.landmarks[group] = mirror_landmarks_68(lms, im.shape) return im flip_rand = np.random.random() > 0.5 # rot_rand = np.random.random() > 0.5 # crop_rand = np.random.random() > 0.5 rot_rand = True # like ECT crop_rand = True # like ECT if crop_rand: lim = image_size - crop_size min_crop_inds = np.random.randint(0, lim, 2) max_crop_inds = min_crop_inds + crop_size img = img.crop(min_crop_inds, max_crop_inds) if flip and flip_rand: img = mirror_image(img) if rot_rand: rot_angle = 2 * angle_range * np.random.random_sample() - angle_range # img = img.rotate_ccw_about_centre(rot_angle) # Get image's bounding box coordinates bbox = bounding_box((0, 0), [img.shape[0] - 1, img.shape[1] - 1]) # Translate to origin and rotate counter-clockwise trans = Translation(-img.centre(), skip_checks=True).compose_before( Rotation.init_from_2d_ccw_angle(rot_angle, degrees=True)) rotated_bbox = trans.apply(bbox) # Create new translation so that min bbox values go to 0 t = Translation(-rotated_bbox.bounds()[0]) trans.compose_before_inplace(t) rotated_bbox = trans.apply(bbox) # Output image's shape is the range of the rotated bounding box # while respecting the users rounding preference. shape = round_image_shape(rotated_bbox.range() + 1, 'round') img = img.warp_to_shape( shape, trans.pseudoinverse(), warp_landmarks=True, mode=warp_mode) img = img.resize([image_size, image_size]) return img
def test_translation(): t_vec = np.array([1, 2, 3]) starting_vector = np.random.rand(10, 3) transform = Translation(t_vec) transformed = transform.apply(starting_vector) assert_allclose(starting_vector + t_vec, transformed)
def translation_compose_after_uniformscale_test(): t = Translation([3, 4]) s = UniformScale(2, 2) res = t.compose_after(s) assert(type(res) == Similarity)
def test_translation_from_list(): t_a = Translation([3, 4]) t_b = Translation(np.array([3, 4])) assert (np.all(t_a.h_matrix == t_b.h_matrix))
def test_5d_translation(): t_vec = np.ones(5) Translation(t_vec)
def manual_no_op_chain_test(): points = PointCloud(np.random.random([10, 2])) t = Translation([3, 4]) chain = TransformChain([t, t.pseudoinverse]) points_applied = chain.apply(points) assert(np.allclose(points_applied.points, points.points))
def noisy_alignment_similarity_transform(source, target, noise_type='uniform', noise_percentage=0.1, allow_alignment_rotation=False): r""" Constructs and perturbs the optimal similarity transform between the source and target shapes by adding noise to its parameters. Parameters ---------- source : `menpo.shape.PointCloud` The source pointcloud instance used in the alignment target : `menpo.shape.PointCloud` The target pointcloud instance used in the alignment noise_type : ``{'uniform', 'gaussian'}``, optional The type of noise to be added. noise_percentage : `float` in ``(0, 1)`` or `list` of `len` `3`, optional The standard percentage of noise to be added. If `float`, then the same amount of noise is applied to the scale, rotation and translation parameters of the optimal similarity transform. If `list` of `float` it must have length 3, where the first, second and third elements denote the amount of noise to be applied to the scale, rotation and translation parameters, respectively. allow_alignment_rotation : `bool`, optional If ``False``, then the rotation is not considered when computing the optimal similarity transform between source and target. Returns ------- noisy_alignment_similarity_transform : `menpo.transform.Similarity` The noisy Similarity Transform between source and target. """ if isinstance(noise_percentage, float): noise_percentage = [noise_percentage] * 3 elif len(noise_percentage) == 1: noise_percentage *= 3 similarity = AlignmentSimilarity(source, target, rotation=allow_alignment_rotation) if noise_type is 'gaussian': s = noise_percentage[0] * (0.5 / 3) * np.asscalar(np.random.randn(1)) r = noise_percentage[1] * (180 / 3) * np.asscalar(np.random.randn(1)) t = noise_percentage[2] * (target.range() / 3) * np.random.randn(2) s = scale_about_centre(target, 1 + s) r = rotate_ccw_about_centre(target, r) t = Translation(t, source.n_dims) elif noise_type is 'uniform': s = noise_percentage[0] * 0.5 * (2 * np.asscalar(np.random.randn(1)) - 1) r = noise_percentage[1] * 180 * (2 * np.asscalar(np.random.rand(1)) - 1) t = noise_percentage[2] * target.range() * (2 * np.random.rand(2) - 1) s = scale_about_centre(target, 1. + s) r = rotate_ccw_about_centre(target, r) t = Translation(t, source.n_dims) else: raise ValueError('Unexpected noise type. ' 'Supported values are {gaussian, uniform}') return similarity.compose_after(t.compose_after(s.compose_after(r)))
def non_rigid_icp(source, target, eps=1e-3, stiffness_values=None, verbose=False, landmarks=None, lm_weight=None): r""" Deforms the source trimesh to align with to optimally the target. """ # Scale factors completely change the behavior of the algorithm - always # rescale the source down to a sensible size (so it fits inside box of # diagonal 1) and is centred on the origin. We'll undo this after the fit # so the user can use whatever scale they prefer. tr = Translation(-1 * source.centre()) sc = UniformScale(1.0 / np.sqrt(np.sum(source.range()**2)), 3) prepare = tr.compose_before(sc) source = prepare.apply(source) target = prepare.apply(target) # store how to undo the similarity transform restore = prepare.pseudoinverse() n_dims = source.n_dims # Homogeneous dimension (1 extra for translation effects) h_dims = n_dims + 1 points, trilist = source.points, source.trilist n = points.shape[0] # record number of points edge_tris = source.boundary_tri_index() M_s = node_arc_incidence_matrix(source) # weight matrix G = np.identity(n_dims + 1) M_kron_G_s = sp.kron(M_s, G) # build octree for finding closest points on target. target_vtk = trimesh_to_vtk(target) closest_points_on_target = VTKClosestPointLocator(target_vtk) # save out the target normals. We need them for the weight matrix. target_tri_normals = target.tri_normals() # init transformation X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T v_i = points if stiffness_values is not None: stiffness = stiffness_values if verbose: print('using user defined stiffness values: {}'.format(stiffness)) else: # these values have been empirically found to perform well for well # rigidly aligned facial meshes stiffness = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2] if verbose: print('using default stiffness values: {}'.format(stiffness)) if lm_weight is not None: lm_weight = lm_weight if verbose: print('using user defined lm_weight values: {}'.format(lm_weight)) else: # these values have been empirically found to perform well for well # rigidly aligned facial meshes lm_weight = [5, 2, .5, 0, 0, 0, 0, 0] if verbose: print('using default lm_weight values: {}'.format(lm_weight)) # to store per iteration information info = [] # we need to prepare some indices for efficient construction of the D # sparse matrix. row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(), np.arange(n))) x = np.arange(n * h_dims).reshape((n, h_dims)) col = np.hstack((x[:, :n_dims].ravel(), x[:, n_dims])) if landmarks is not None: if verbose: print( "'{}' landmarks will be used as a landmark constraint.".format( landmarks)) source_lm_index = source.distance_to( source.landmarks[landmarks].lms).argmin(axis=0) target_lms = target.landmarks[landmarks].lms U_L = target_lms.points n_landmarks = target_lms.n_points lm_mask = np.in1d(row, source_lm_index) col_lm = col[lm_mask] # pull out the rows for the lms - but the values are # all wrong! need to map them back to the order of the landmarks row_lm_to_fix = row[lm_mask] source_lm_index_l = list(source_lm_index) row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix]) o = np.ones(n) for alpha, beta in zip(stiffness, lm_weight): alpha_M_kron_G_s = alpha * M_kron_G_s # get the term for stiffness j = 0 while True: # iterate until convergence # find nearest neighbour and the normals U, tri_indices = closest_points_on_target(v_i) # ---- WEIGHTS ---- # 1. Edges # Are any of the corresponding tris on the edge of the target? # Where they are we return a false weight (we *don't* want to # include these points in the solve) w_i_e = np.in1d(tri_indices, edge_tris, invert=True) # 2. Normals # Calculate the normals of the current v_i v_i_tm = TriMesh(v_i, trilist=trilist, copy=False) v_i_n = v_i_tm.vertex_normals() # Extract the corresponding normals from the target u_i_n = target_tri_normals[tri_indices] # If the dot of the normals is lt 0.9 don't contrib to deformation w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9 # 3. Self-intersection # This adds approximately 12% to the running cost and doesn't seem # to be very critical in helping mesh fitting performance so for # now it's removed. Revisit later. # # Build an intersector for the current deformed target # intersect = build_intersector(to_vtk(v_i_tm)) # # budge the source points 1% closer to the target # source = v_i + ((U - v_i) * 0.5) # # if the vector from source to target intersects the deformed # # template we don't want to include it in the optimisation. # problematic = [i for i, (s, t) in enumerate(zip(source, U)) # if len(intersect(s, t)[0]) > 0] # print(len(problematic) * 1.0 / n) # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool) # w_i_i[problematic] = False # Form the overall w_i from the normals, edge case w_i = np.logical_and(w_i_n, w_i_e) # we could add self intersection at a later date too... # w_i = np.logical_and(np.logical_and(w_i_n, w_i_e), w_i_i) prop_w_i = (n - w_i.sum() * 1.0) / n prop_w_i_n = (n - w_i_n.sum() * 1.0) / n prop_w_i_e = (n - w_i_e.sum() * 1.0) / n j = j + 1 # Build the sparse diagonal weight matrix W_s = sp.diags(w_i.astype(np.float)[None, :], [0]) data = np.hstack((v_i.ravel(), o)) D_s = sp.coo_matrix((data, (row, col))) # nullify the masked U values U[~w_i] = 0 to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)] to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)), U] if landmarks: D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)), shape=(n_landmarks, D_s.shape[1])) to_stack_A.append(beta * D_L) to_stack_B.append(beta * U_L) A_s = sp.vstack(to_stack_A).tocsr() B_s = sp.vstack(to_stack_B).tocsr() X = spsolve(A_s, B_s) # deform template v_i = D_s.dot(X) err = np.linalg.norm(X_prev - X, ord='fro') if landmarks is not None: src_lms = v_i[source_lm_index] lm_err = np.sqrt((src_lms - U_L)**2).sum(axis=1).mean() if verbose: v_str = ('a: {}, ({}) - total : {:.0%} norms: {:.0%} ' 'edges: {:.0%}'.format(alpha, j, prop_w_i, prop_w_i_n, prop_w_i_e)) if landmarks is not None: v_str += ' beta: {}, lm_err: {:.5f}'.format(beta, lm_err) print(v_str) info_dict = { 'alpha': alpha, 'iteration': j + 1, 'prop_omitted': prop_w_i, 'prop_omitted_norms': prop_w_i_n, 'prop_omitted_edges': prop_w_i_e, 'delta': err } if landmarks: info_dict['beta'] = beta info_dict['lm_err'] = lm_err info.append(info_dict) X_prev = X if err / np.sqrt(np.size(X_prev)) < eps: break # final result if we choose closest points point_corr = closest_points_on_target(v_i)[0] result = { 'deformed_source': restore.apply(v_i), 'matched_target': restore.apply(point_corr), 'matched_tri_indices': tri_indices, 'info': info } if landmarks is not None: result['source_lm_index'] = source_lm_index return result
def test_init_from_pointcloud_return_transform(): correct_tr = Translation([5, 5]) pc = correct_tr.apply(PointCloud.init_2d_grid((10, 10))) im, tr = Image.init_from_pointcloud(pc, return_transform=True) assert im.shape == (9, 9) assert_allclose(tr.as_vector(), -correct_tr.as_vector())
def normalize(gt): from menpo.transform import Translation, NonUniformScale t = Translation(gt.centre()).pseudoinverse() s = NonUniformScale(gt.range()).pseudoinverse() return t.compose_before(s)
def align_mesh_to_template(source, target, scale_corrective=1.2): scale = Scale((target.norm() / source.norm()) * scale_corrective, n_dims=target.n_dims) translation = Translation(target.centre() - source.centre()) rotation = Rotation.init_from_3d_ccw_angle_around_x(-45) return rotation.compose_before(translation.compose_before(scale))
def solve_pnp( points_2d, points_3d, intrinsic_matrix, distortion_coefficients=None, pnp_method=SOLVEPNP_ITERATIVE, n_iterations=100, reprojection_error=8.0, initial_transform=None, ): """ Use OpenCV to solve the Perspective-N-Point problem (PnP). Uses Ransac PnP as this typically provides better results. The image and mesh must both have the same landmark group name attached. Note the intrinsic matrix (if given) must be in "OpenCV" space and thus has the "x" and "y" axes flipped w.r.t the menpo norm. E.g. the intrinsic matrix is defined as follows: [fx, 0, cx, 0] [ 0, fy, cy, 0] [ 0, 0, 1, 0] [ 0, 0, 0, 1] Parameters ---------- points_2d : :map`Pointcloud` or subclass The 2D points in the image to solve the PnP problem with. points_3d : :map`Pointcloud` or subclass The 3D points to solve the PnP problem with group : str, optional The name of the landmark group intrinsic_matrix : :map`Homogeneous` The intrinsic matrix - if the intrinsic matrix is unknow please see usage of pinhole_intrinsic_matrix() distortion_coefficients : ``(D,)`` `ndarray` The distortion coefficients (if not given assumes 0 coefficients). See the OpenCV documentation for the distortion coefficient types that are supported. pnp_method : int The OpenCV PNP method e.g. cv2.SOLVEPNP_ITERATIVE or otherwise n_iterations : int The number of iterations to perform reprojection_error : float The maximum reprojection error to allow for a point to be considered an inlier. initial_transform : :map`Homogeneous` The initialization for the cv2.SOLVEPNP_ITERATIVE method. Compatible with the returned model transformation returned by this method. Returns ------- model_view_t : :map`Homogeneous` The combined ModelView transform. Can be used to place the 3D points in "eye space". proj_t : :map`Homogeneous` A transform that can be used to project the input 3D points back into the image """ import cv2 if distortion_coefficients is None: distortion_coefficients = np.zeros(4) r_vec = t_vec = None if initial_transform is not None: if pnp_method != cv2.SOLVEPNP_ITERATIVE: raise ValueError( "Initial estimates can only be given to SOLVEPNP_ITERATIVE") else: r_vec = cv2.Rodrigues(initial_transform.h_matrix[:3, :3])[0] t_vec = initial_transform.h_matrix[:3, -1].ravel() converged, r_vec, t_vec, _ = cv2.solvePnPRansac( points_3d.points, points_2d.points[:, ::-1], intrinsic_matrix.h_matrix[:3, :3], distortion_coefficients, flags=pnp_method, iterationsCount=n_iterations, reprojectionError=reprojection_error, useExtrinsicGuess=r_vec is not None, rvec=r_vec, tvec=t_vec, ) if not converged: raise ValueError("cv2.solvePnPRansac failed to converge") rotation = Rotation(cv2.Rodrigues(r_vec)[0]) translation = Translation(t_vec.ravel()) model_view_t = rotation.compose_before(translation) proj_t = intrinsic_matrix.compose_before( flip_xy_yx()).compose_before(_drop_h) return model_view_t, proj_t
def test_5d_translation(): t_vec = np.ones(5) with raises(ValueError): Translation(t_vec)
def test_translation_compose_after_homog(): # can't do this inplace - so should just give transform chain homog = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) t = Translation([3, 4]) res = t.compose_after(homog) assert type(res) == Homogeneous
def test_1d_translation(): t_vec = np.array([1]) with raises(ValueError): Translation(t_vec)
def test_uniformscale_compose_after_translation(): t = Translation([3, 4]) s = UniformScale(2, 2) res = s.compose_after(t) assert type(res) == Similarity
def test_1d_translation(): t_vec = np.array([1]) Translation(t_vec)
def test_translation_set_h_matrix_raises_notimplementederror(): t = Translation([3, 4]) t.set_h_matrix(t.h_matrix)
def test_translation_3d_as_vector(): params = np.array([1, 2, 3]) vec = Translation(params).as_vector() assert_allclose(vec, params)
def mesh_in_unit_sphere(mesh): scale = UniformScale(1 / mesh.norm(), mesh.n_dims) translation = Translation(-scale.apply(mesh).centre()) return translation.compose_after(scale)
def test_translation_2d_n_parameters(): trans = np.array([1, 2]) t = Translation(trans) assert (t.n_parameters == 2)
def fit(imagepath): image = mio.import_image(imagepath, normalize=False) if len(image.pixels.shape) == 2: image.pixels = np.stack([image.pixels, image.pixels, image.pixels]) if image.pixels.shape[0] == 1: image.pixels = np.concatenate( [image.pixels, image.pixels, image.pixels], axis=0) print(image.pixels_with_channels_at_back().shape) bb = detect(image.pixels_with_channels_at_back())[0] initial_shape = aam_fitter.fit_from_bb(image, bb).final_shape result = fitter.fit_from_shape(image, initial_shape, max_iters=40, camera_update=True, focal_length_update=False, reconstruction_weight=1, shape_prior_weight=.4e8, texture_prior_weight=1., landmarks_prior_weight=1e5, return_costs=True, init_shape_params_from_lms=False) mesh = ColouredTriMesh(result.final_mesh.points, result.final_mesh.trilist) def transform(mesh): return result._affine_transforms[-1].apply( result.camera_transforms[-1].apply(mesh)) mesh_in_img = transform(lambertian_shading(mesh)) expr_dir = image.path.parent p = image.path.stem raster = rasterize_mesh(mesh_in_img, image.shape) uv_shape = (600, 1000) template = shape_model.mean() unwrapped_template = optimal_cylindrical_unwrap(template).apply(template) minimum = unwrapped_template.bounds(boundary=0)[0] unwrapped_template = Translation(-minimum).apply(unwrapped_template) unwrapped_template.points = unwrapped_template.points[:, [1, 0]] unwrapped_template.points[:, 0] = unwrapped_template.points[:, 0].max( ) - unwrapped_template.points[:, 0] unwrapped_template.points *= np.array([.40, .31]) unwrapped_template.points *= np.array([uv_shape]) bcoords_img, tri_index_img = rasterize_barycentric_coordinate_images( unwrapped_template, uv_shape) TI = tri_index_img.as_vector() BC = bcoords_img.as_vector(keep_channels=True).T def masked_texture(mesh_in_image, background): sample_points_3d = mesh_in_image.project_barycentric_coordinates( BC, TI) texture = bcoords_img.from_vector( background.sample(sample_points_3d.points[:, :2])) return texture uv = masked_texture(mesh_in_img, image) t = TexturedTriMesh( result.final_mesh.points, image_coords_to_tcoords(uv.shape).apply(unwrapped_template).points, uv, mesh_in_img.trilist) m3io.export_textured_mesh(t, str(expr_dir / Path(p).with_suffix('.mesh.obj')), overwrite=True) mio.export_image(raster, str(expr_dir / Path(p).with_suffix('.render.jpg')), overwrite=True)