def load_mesh(path): dat = np.load(path) verts = dat["verts"] faces = dat["faces"] return pyrender.Mesh( [pyrender.Primitive(positions=verts, indices=faces)])
def render_depth_map_mesh( self, K, R, t, height, width, znear=0.05, zfar=1500, ): scene = pyrender.Scene() mesh = pyrender.Mesh( primitives=[ pyrender.Primitive( positions=self.verts, normals=self.normals, color_0=self.colors, indices=self.faces, mode=pyrender.GLTF.TRIANGLES, ) ], is_visible=True, ) mesh_node = pyrender.Node(mesh=mesh, matrix=np.eye(4)) scene.add_node(mesh_node) cam = pyrender.IntrinsicsCamera( fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=znear, zfar=zfar, ) T = np.eye(4) T[:3, :3] = R.T T[:3, 3] = (-R.T @ t.reshape(3, 1)).ravel() cv2gl = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) T = T @ cv2gl cam_node = pyrender.Node(camera=cam, matrix=T) scene.add_node(cam_node) light = pyrender.DirectionalLight(color=np.ones(3), intensity=3) light_node = pyrender.Node(light=light, matrix=np.eye(4)) scene.add_node(light_node, parent_node=cam_node) render = pyrender.OffscreenRenderer(self.width, self.height) color, depth = render.render(scene) # if self.vis: # depth[depth <= 0] = np.NaN # depth = co.plt.image_colorcode(depth) # imwrite(dm_path.with_suffix(".jpg"), depth) return depth
def _generate_mesh(self, camera_pose, render_flags : BoxRenderFlags = None): positions, indices = self._box_geometry colors = BoxRenderer._generate_labels_normals_colormap(box_model = self._box_model, camera_pose = camera_pose, camera_transform = self._camera_transform, box_pose = self._box_model_pose, render_flags = render_flags) prim = pyrender.Primitive(positions = positions, indices = indices, mode = 4, color_0 = colors) mesh = pyrender.Mesh([prim]) self._box_mesh = mesh
def create_edges(p0_pts, p1_pts, p0_color=np.array([1.0, 0.0, 0.0]), p1_color=np.array([0.0, 1.0, 0.0]), line_color=np.array((0.0, 0.0, 1.0)), no_vex=False): # N,3 if p1_pts.size <= 0: return None assert p0_pts.shape[1] == 3, p0_pts.shape assert p1_pts.shape[1] == 3, p1_pts.shape num = p0_pts.shape[0] line_pts = [] for i in range(num): line_pts.append(p0_pts[i]) line_pts.append(p1_pts[i]) line_pts = np.array(line_pts) plist = [] plist.append( pyrender.Primitive(positions=line_pts, color_0=line_color, mode=pyrender.constants.GLTF.LINES)) if not no_vex: plist.append( pyrender.Primitive(positions=p0_pts, color_0=p0_color, mode=pyrender.constants.GLTF.POINTS)) plist.append( pyrender.Primitive(positions=p1_pts, color_0=p1_color, mode=pyrender.constants.GLTF.POINTS)) vecs_mesh = pyrender.Mesh(primitives=plist, is_visible=True) return vecs_mesh
def deodr_mesh_to_pyrender(deodr_mesh): # trimesh and pyrender do to handle faces indices for texture # that are different from face indices for the 3d vertices # we need to duplicate vertices faces, mask_v, mask_vt = trimesh.visual.texture.unmerge_faces( deodr_mesh.faces, deodr_mesh.faces_uv) vertices = deodr_mesh.vertices[mask_v] deodr_mesh.compute_vertex_normals() vertex_normals = deodr_mesh.vertex_normals[mask_v] uv = deodr_mesh.uv[mask_vt] pyrender_uv = np.column_stack(( (uv[:, 0]) / deodr_mesh.texture.shape[0], (1 - uv[:, 1] / deodr_mesh.texture.shape[1]), )) material = None poses = None color_0 = None if material is None: base_color_texture = pyrender.texture.Texture( source=deodr_mesh.texture, source_channels="RGB") material = pyrender.MetallicRoughnessMaterial( alphaMode="BLEND", baseColorFactor=[1, 1, 1, 1.0], metallicFactor=0, roughnessFactor=1, baseColorTexture=base_color_texture, ) material.wireframe = False primitive = pyrender.Primitive( positions=vertices, normals=vertex_normals, texcoord_0=pyrender_uv, color_0=color_0, indices=faces, material=material, mode=pyrender.constants.GLTF.TRIANGLES, poses=poses, ) return pyrender.Mesh(primitives=[primitive])
def visualise_hypothesis_estimation( self, point_cloud, neighbourhood_cloud_indices, hypothesised_point_index, sampled_three_points_indices, ground_truth_normal_index, ): color_values = { "point_cloud": [255, 255, 255], "neighbourhood_cloud": [235, 189, 52], "hypothesised_point": [182, 0, 214], "sampled_three_points": [232, 0, 0], "estimated_normal": [86, 0, 214], } hypothesised_point = point_cloud[hypothesised_point_index] hypothesised_normal = self.get_normal_from_three_points( *point_cloud[sampled_three_points_indices]) true_normal = self.ground_truth_normals[ground_truth_normal_index] colors = (np.ones((len(point_cloud), 3)) * np.array(color_values["point_cloud"]) / 255) colors[neighbourhood_cloud_indices] = ( np.array(color_values["neighbourhood_cloud"]) / 255) colors[hypothesised_point_index] = ( np.array(color_values["hypothesised_point"]) / 255) colors[sampled_three_points_indices] = ( np.array(color_values["sampled_three_points"]) / 255) normal_colors = (np.ones( (2, 4)) * (color_values["estimated_normal"] + [255]) / 255) triangle_colors = (np.ones( (2, 4)) * (color_values["sampled_three_points"] + [255]) / 255) [a, b, c] = point_cloud[sampled_three_points_indices] run_gui_pyrmesh( [ pyrender.Mesh.from_points(point_cloud, colors=colors), pyrender.Mesh([ pyrender.Primitive( [ hypothesised_point, hypothesised_point + true_normal * 0.12, ], mode=3, color_0=normal_colors, ), pyrender.Primitive( [ hypothesised_point, hypothesised_point + hypothesised_normal * 0.12, ], mode=3, color_0=triangle_colors, ), pyrender.Primitive([a, b], mode=3, color_0=triangle_colors), pyrender.Primitive([b, c], mode=3, color_0=triangle_colors), pyrender.Primitive([c, a], mode=3, color_0=triangle_colors), ]), ], point_size=10, )
def get_warp(canonical: trimesh.base.Trimesh, goal: trimesh.base.Trimesh, camera_transform: np.array, h: int, w: int, camera_angle_x: float, debug: bool = False) -> np.array: """ Calculate warp vectors pointing from goal SMPL to canonical SMPL for the closest ray intersection (wrt. camera origin) and return a warp for each ray. If the ray doesn't intersect with the goal smpl than a warp equal to zero will be returned for that ray (pixel) Parameters ---------- canonical : trimesh.base.Trimesh Canonical SMPL. goal : trimesh.base.Trimesh Goal SMPL. camera_transform : np.array (4, 4) Camera transformation matrix. h : int Height of camera. w : int Width of camera. camera_angle_x : float FOV of camera. debug: bool If True, a 3D and 2D plot of the image will be created and shown. Returns ------- warp_img : np.array (h, w, 3) Warp vectors (3D) pointing from goal smpl to canonical smpl intersections. """ f = .5 * w / np.tan(.5 * camera_angle_x) rays_translation, rays_direction = get_rays(h, w, f, camera_transform) camera_origin = rays_translation[0][0] # calculate intersections with rays and goal smpl intersector = RayMeshIntersector(goal) goal_intersections = intersector.intersects_location( rays_translation.reshape(-1, 3), rays_direction.reshape(-1, 3)) goal_intersections_points = goal_intersections[0] # (N_intersects, 3) goal_intersections_face_indices = goal_intersections[2] # (N_intersects, ) goal_intersections_ray_indices = goal_intersections[1] # (N_intersects, ) # Find multiple intersections and use only closest unique_goal_intersect_points = [] unique_goal_intersect_face_indices = [] unique_goal_intersect_ray_indices = [] depth = np.zeros((w * h)) intersect_indices = np.arange(len(goal_intersections_points)) for ray in np.unique(goal_intersections_ray_indices): ray_mask = goal_intersections_ray_indices == ray indices_ray = intersect_indices[ray_mask] ray_intersects = goal_intersections_points[ray_mask] distances_camera = np.linalg.norm(ray_intersects - camera_origin, axis=1) closest_intersect_index = indices_ray[np.argmin(distances_camera)] unique_goal_intersect_points.append( goal_intersections_points[closest_intersect_index]) unique_goal_intersect_face_indices.append( goal_intersections_face_indices[closest_intersect_index]) unique_goal_intersect_ray_indices.append( goal_intersections_ray_indices[closest_intersect_index]) depth[ray] = np.min(distances_camera) assert (len(unique_goal_intersect_points) == len(unique_goal_intersect_face_indices) == len(unique_goal_intersect_ray_indices)) assert ((np.unique(goal_intersections_ray_indices) == unique_goal_intersect_ray_indices).all()) goal_intersections_points = np.array(unique_goal_intersect_points) goal_intersections_face_indices = np.array( unique_goal_intersect_face_indices) goal_intersections_ray_indices = np.array( unique_goal_intersect_ray_indices) # Calculate for each intersection on goal SMPL the corresponding # intersection on the canonical SMPL canonical_intersections = [] for i, face_idx in enumerate(goal_intersections_face_indices): vertex_indices = goal.faces[face_idx] goal_vertices = goal.vertices[vertex_indices] canonical_vertices = canonical.vertices[vertex_indices] lin_coeffs_vertices = np.linalg.solve(goal_vertices.T, goal_intersections_points[i]) canonical_intersection = canonical_vertices.T.dot(lin_coeffs_vertices) canonical_intersections.append(canonical_intersection) canonical_intersections = np.array(canonical_intersections) # Calculate actual warp for intersections warp = canonical_intersections - goal_intersections_points # Set each pixel corresponding to ray index to the warp warp_img_flat = np.zeros((h * w, 3)) warp_img_flat[goal_intersections_ray_indices] = warp warp_img = warp_img_flat.reshape((h, w, 3)) warp_min = -1 #np.min(warp_img, axis=(0,1)) warp_max = 1 #np.max(warp_img, axis=(0,1)) warp_normalized = (warp_img - warp_min) / (warp_max - warp_min) if debug: plt.imshow(warp_normalized) plt.show() scene = pyrender.Scene() lines_warp = np.hstack( (goal_intersections_points, goal_intersections_points + warp)).reshape(-1, 3) primitive = [pyrender.Primitive(lines_warp, mode=1)] primitive_mesh = pyrender.Mesh(primitive) scene.add(primitive_mesh) pyrender.Viewer(scene, use_raymond_lighting=True) return warp_img, depth.reshape((h, w))
def render_depth_maps_mesh( dm_dir, mesh_path, Ks, Rs, ts, height, width, znear=0.05, zfar=1500, write_vis=True, ): print(f"render depth maps to {dm_dir}") dm_dir.mkdir(parents=True, exist_ok=True) mesh = o3d.io.read_triangle_mesh(str(mesh_path)) mesh.compute_vertex_normals() mesh.paint_uniform_color((0.7, 0.7, 0.7)) verts = np.asarray(mesh.vertices).astype(np.float32) faces = np.asarray(mesh.triangles).astype(np.int32) colors = np.asarray(mesh.vertex_colors).astype(np.float32) normals = np.asarray(mesh.vertex_normals).astype(np.float32) dm_paths = [] for view_idx, K, R, t in zip(itertools.count(), tqdm(Ks), Rs, ts): dm_path = dm_dir / f"dm_{view_idx:08d}.npy" dm_paths.append(dm_path) if dm_path.exists(): continue scene = pyrender.Scene() mesh = pyrender.Mesh( primitives=[ pyrender.Primitive( positions=verts, normals=normals, color_0=colors, indices=faces, mode=pyrender.GLTF.TRIANGLES, ) ], is_visible=True, ) mesh_node = pyrender.Node(mesh=mesh, matrix=np.eye(4)) scene.add_node(mesh_node) cam = pyrender.IntrinsicsCamera( fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=znear, zfar=zfar, ) T = np.eye(4) T[:3, :3] = R.T T[:3, 3] = (-R.T @ t.reshape(3, 1)).ravel() cv2gl = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) T = T @ cv2gl cam_node = pyrender.Node(camera=cam, matrix=T) scene.add_node(cam_node) light = pyrender.DirectionalLight(color=np.ones(3), intensity=3) light_node = pyrender.Node(light=light, matrix=np.eye(4)) scene.add_node(light_node, parent_node=cam_node) render = pyrender.OffscreenRenderer(width, height) color, depth = render.render(scene) np.save(dm_path, depth) if write_vis: depth[depth <= 0] = np.NaN depth = co.plt.image_colorcode(depth) imwrite(dm_path.with_suffix(".jpg"), depth) return dm_paths