def get_smpl(pkl_data, json_data): gender = json_data['people'][0]['gender_gt'] print('Target height {}, weight {}'.format(json_data['people'][0]['height'], json_data['people'][0]['weight'])) betas = torch.Tensor(pkl_data['betas']).unsqueeze(0) pose = torch.Tensor(pkl_data['body_pose']).unsqueeze(0) transl = torch.Tensor(pkl_data['transl']).unsqueeze(0) global_orient = torch.Tensor(pkl_data['global_orient']).unsqueeze(0) model = smplx.create('models', model_type='smpl', gender=gender) output = model(betas=betas, body_pose=pose, transl=transl, global_orient=global_orient, return_verts=True) smpl_vertices = output.vertices.detach().cpu().numpy().squeeze() smpl_joints = output.joints.detach().cpu().numpy().squeeze() output_unposed = model(betas=betas, body_pose=pose * 0, transl=transl, global_orient=global_orient, return_verts=True) smpl_vertices_unposed = output_unposed.vertices.detach().cpu().numpy().squeeze() for i, lbl in enumerate(['Wingspan', 'Height', 'Thickness']): print('Actual', lbl, smpl_vertices_unposed[:, i].max() - smpl_vertices_unposed[:, i].min(), end=' ') print() smpl_trimesh = trimesh.Trimesh(vertices=np.asarray(smpl_vertices_unposed), faces=model.faces) print('Est weight from volume', smpl_trimesh.volume * 1.03 * 1000) # print('Pose embedding', pkl_data['pose_embedding']) # print('Body pose', np.array2string(pkl_data['body_pose'], separator=', ')) smpl_o3d = o3d.TriangleMesh() smpl_o3d.triangles = o3d.Vector3iVector(model.faces) smpl_o3d.vertices = o3d.Vector3dVector(smpl_vertices) smpl_o3d.compute_vertex_normals() # smpl_o3d.paint_uniform_color([0.3, 0.3, 0.3]) smpl_o3d_2 = o3d.TriangleMesh() smpl_o3d_2.triangles = o3d.Vector3iVector(model.faces) smpl_o3d_2.vertices = o3d.Vector3dVector(smpl_vertices + np.array([1.5, 0, 0])) smpl_o3d_2.compute_vertex_normals() smpl_o3d_2.paint_uniform_color([0.7, 0.3, 0.3]) # Visualize SMPL joints - Patrick camera = PerspectiveCamera(rotation=torch.tensor(pkl_data['camera_rotation']).unsqueeze(0), translation=torch.tensor(pkl_data['camera_translation']).unsqueeze(0), center=torch.tensor(pkl_data['camera_center']), focal_length_x=torch.tensor(pkl_data['camera_focal_length_x']), focal_length_y=torch.tensor(pkl_data['camera_focal_length_y'])) gt_pos_3d = camera.inverse_camera_tform(torch.tensor(pkl_data['gt_joints']).unsqueeze(0), 1.8).detach().squeeze(0).cpu().numpy() all_markers = [] for i in range(25): color = cm.jet(i / 25.0)[:3] # smpl_marker = get_o3d_sphere(color=color, pos=smpl_joints[i, :]) # all_markers.append(smpl_marker) pred_marker = get_o3d_sphere(color=color, pos=gt_pos_3d[i, :], radius=0.03) all_markers.append(pred_marker) return smpl_vertices, model.faces, smpl_o3d, smpl_o3d_2, all_markers
def stl2ply(stl_mesh): points = pd.DataFrame(stl_mesh.v0).append(pd.DataFrame( stl_mesh.v1)).append(pd.DataFrame(stl_mesh.v2)) points = points.drop_duplicates() points = points.reset_index(drop=True) points = pd.DataFrame(points.values, columns=['x', 'y', 'z']) points['index'] = points.index edge = pd.DataFrame( stl_mesh.points, columns=['x0', 'y0', 'z0', 'x1', 'y1', 'z1', 'x2', 'y2', 'z2']) edge['id0'] = edge.merge(points, how='left', left_on=['x0', 'y0', 'z0'], right_on=['x', 'y', 'z'])['index'] edge['id1'] = edge.merge(points, how='left', left_on=['x1', 'y1', 'z1'], right_on=['x', 'y', 'z'])['index'] edge['id2'] = edge.merge(points, how='left', left_on=['x2', 'y2', 'z2'], right_on=['x', 'y', 'z'])['index'] edge = edge[['id0', 'id1', 'id2']].values points = points[['x', 'y', 'z']].values ply_mesh = pn.TriangleMesh() ply_mesh.vertices = pn.Vector3dVector(points) ply_mesh.triangles = pn.Vector3iVector(edge) ply_mesh.triangle_normals = pn.Vector3dVector(stl_mesh.normals) return ply_mesh
def get_all_smpl(pkl_data, json_data): gender = json_data['people'][0]['gender_gt'] all_meshes = [] trans = np.array([4, 0, 0]) for i, result in enumerate(pkl_data['all_results']): t = trans + [0, i * 3, 0] betas = torch.Tensor(result['betas']).unsqueeze(0) pose = torch.Tensor(result['body_pose']).unsqueeze(0) transl = torch.Tensor(result['transl']).unsqueeze(0) global_orient = torch.Tensor(result['global_orient']).unsqueeze(0) model = smplx.create('models', model_type='smpl', gender=gender) output = model(betas=betas, body_pose=pose, transl=transl, global_orient=global_orient, return_verts=True) smpl_vertices = output.vertices.detach().cpu().numpy().squeeze() smpl_o3d = o3d.TriangleMesh() smpl_o3d.triangles = o3d.Vector3iVector(model.faces) smpl_o3d.vertices = o3d.Vector3dVector(smpl_vertices) smpl_o3d.compute_vertex_normals() smpl_o3d.translate(t) for idx, key in enumerate(result['loss_dict'].keys()): lbl = '{} {:.2f}'.format(key, float(result['loss_dict'][key])) all_meshes.append(text_3d(lbl, t + [1, idx * 0.2 - 1, 2], direction=(0.01, 0, -1), degree=-90, font_size=150, density=0.2)) all_meshes.append(smpl_o3d) return all_meshes
def decorated(*args, **kwargs): vis = open3d.Visualizer() vis.create_window(window_name='Human', width=570, height=700, left=720, top=180, visible=True) pygame.init() setting = Setting() screen = pygame.display.set_mode(setting.win_size) pygame.display.set_caption('roll') mesh = [ open3d.TriangleMesh(), open3d.TriangleMesh(), open3d.TriangleMesh() ] attr = Attribute() line_group, cir_group, text_group = [], {}, list() con_rect_cir(screen, setting, line_group, cir_group) con_text(setting, line_group, text_group) im = Import(screen, setting) ex = Export(screen, setting, im) HOLD_ON = pygame.USEREVENT + 1 #自定义事件 pygame.time.set_timer(HOLD_ON, 60) model = Model(30, 32, 42, 54, 66, 78, 90, 100) model.load_state_dict(torch.load('human_net.pkl')) while True: check_events(screen, setting, cir_group, im, ex, HOLD_ON, attr, model, mesh, vis) update_screen(screen, setting, line_group, cir_group, text_group, im, ex) if setting.suc_imp == 1: obj_to_mesh(''.join(im.s), mesh) vis.add_geometry(mesh[0]) mesh[0].compute_vertex_normals() ctr = vis.get_view_control() ctr.rotate(0.0, -500.0) attr_reflict_cir(setting, cir_group, attr) setting.suc_imp = 2 vis.update_geometry() vis.poll_events()
def obj2open3dmesh(obj): mesh = open3d.TriangleMesh() mesh.vertices = open3d.Vector3dVector(obj.points) mesh.triangles = open3d.Vector3iVector(obj.faces) mesh.vertex_colors = open3d.Vector3dVector(obj.colors) if mesh.compute_vertex_normals() is False: mesh.compute_vertex_normals() return mesh
def convert_to_IFS(self): mesh = open3d.TriangleMesh() mesh.vertices = self.vertex_data if (len(self.vertex_colors) != 0): mesh.vertex_colors = self.vertex_colors faces = [self.__get_face_indices(f) for f in self.faces] mesh.triangles = open3d.Vector3iVector(faces) return mesh
def plot_flow(x, t, phi, grid_size, t_sample): """ Plot the ground truth points, and the reconstructed patch by meshing the domain [0, 1]^2 and lifting the mesh to R^3 :param x: The ground truth points we are trying to fit :param t: The sample positions used to fit the mesh :param phi: The fitted neural network :param grid_size: The number of sample positions per axis in the meshgrid :return: None """ # I'm doing the input here so you don't crash if you never use this function and don't have OpenGL import open3d with torch.no_grad(): mesh_samples = embed_3d( torch.from_numpy(meshgrid_vertices(grid_size)).to(x), t[0, 2]) mesh_faces = meshgrid_face_indices(grid_size) mesh_vertices = phi(mesh_samples)[:, 0:3] recon_vertices = phi(t)[:, 0:3] gt_color = np.array([0.1, 0.7, 0.1]) recon_color = np.array([0.7, 0.1, 0.1]) mesh_color = np.array([0.1, 0.1, 0.7]) curve_color = np.array([0.2, 0.2, 0.5]) pcloud_gt = open3d.PointCloud() pcloud_gt.points = open3d.Vector3dVector(phi.invert(x).cpu().numpy()) pcloud_gt.paint_uniform_color(gt_color) pcloud_recon = open3d.PointCloud() pcloud_recon.points = open3d.Vector3dVector( recon_vertices.cpu().numpy()) pcloud_recon.paint_uniform_color(recon_color) mesh_recon = open3d.TriangleMesh() mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) pc_initial = open3d.PointCloud() pc_initial.points = open3d.Vector3dVector(t.cpu().numpy()) pc_initial.paint_uniform_color(curve_color) flow_ode = open3d.LineSet() # print(x.shape) # print(t.shape) flow = get_Lines(phi, t[::t_sample, :], 15) flow_ode.points, flow_ode.lines = open3d.Vector3dVector(flow[0]), \ open3d.Vector2iVector(flow[1]) # flow_ode.colors = open3d.Vector3dVector(curve_color) open3d.draw_geometries( [pcloud_gt, pcloud_recon, mesh_recon, pc_initial, flow_ode])
def show_mesh(vertices, triangle, color=[0, 0, 0], only_genmesh=False): mesh = open3d.TriangleMesh() mesh.vertices = open3d.Vector3dVector(vertices) mesh.triangles = open3d.Vector3iVector(triangle) mesh.paint_uniform_color(color) centroid = np.mean(vertices, 0) mesh_frame = open3d.create_mesh_coordinate_frame(size=1.6, origin=centroid) if not only_genmesh: open3d.draw_geometries([mesh, mesh_frame]) return mesh
def __enter__(self): self.steps = 0 if self.visualize: if self.viz_mode == 'o3d': self.vis_o3d = o3d.Visualizer() self.vis_o3d.create_window() self.body_o3d = o3d.TriangleMesh() self.scan = o3d.PointCloud() else: self.mv = MeshViewer(body_color=self.body_color) return self
def create_plane(center, normal, plane_size, axis_size=0.03): mesh = o3d.TriangleMesh() quat = np.r_[normal, 0] e = np.array([0, 1, 0]) cross_vec_1 = np.cross(normal, e) if (cross_vec_1[0] < 0): cross_vec_1 *= -1 cross_vec_1 /= np.linalg.norm(cross_vec_1) cross_vec_2 = np.cross(normal, cross_vec_1) if (cross_vec_2[2] < 0): cross_vec_2 *= -1 cross_vec_2 /= np.linalg.norm(cross_vec_2) vec_set_basis = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) vec_set_normal = np.array([normal, cross_vec_1, cross_vec_2]) rr = Rot.match_vectors(vec_set_basis, vec_set_normal)[0] transform_mat = np.eye(4) transform_mat[:3, :3] = rr.as_dcm().T transform_mat[:3, 3] = center p1 = np.array([-plane_size * 0.5, 0, -plane_size * 0.5]) p2 = np.array([plane_size * 0.5, 0, -plane_size * 0.5]) p3 = np.array([plane_size * 0.5, 0, plane_size * 0.5]) p4 = np.array([-plane_size * 0.5, 0, plane_size * 0.5]) _mesh_points = np.array([p1, p2, p3, p4], dtype=np.float) mesh.vertices = o3d.Vector3dVector(_mesh_points) _mesh_triangles = np.array([[0, 2, 1], [2, 0, 3]]) _mesh_triangles = np.concatenate( [_mesh_triangles, _mesh_triangles[:, ::-1]], axis=0) mesh.triangles = o3d.Vector3iVector(_mesh_triangles) mesh.paint_uniform_color([1, 0.706, 0]) mesh.compute_vertex_normals() mesh.transform(transform_mat) mesh_cylinder = o3d.create_mesh_cylinder(radius=axis_size / 50, height=axis_size) mesh_cylinder.paint_uniform_color([0, 1.0, 0]) transform_yaw = np.array([[1, 0, 0, 0], [0, 0, 1, axis_size / 2], [0, 1, 0, 0], [0, 0, 0, 1]]) mesh_cylinder.transform(transform_yaw) mesh_cylinder.transform(transform_mat) return mesh, mesh_cylinder
def trimesh_to_open3d(src): if isinstance(src, trimesh.Trimesh): dst = open3d.TriangleMesh() dst.vertices = open3d.Vector3dVector(src.vertices) dst.triangles = open3d.Vector3iVector(src.faces) vertex_colors = np.zeros((len(src.vertices), 3), dtype=float) for face, face_color in zip(src.faces, src.visual.face_colors): vertex_colors[face] += face_color[:3] / 255.0 # uint8 -> float indices, counts = np.unique(src.faces.flatten(), return_counts=True) vertex_colors[indices] /= counts[:, None] dst.vertex_colors = open3d.Vector3dVector(vertex_colors) dst.compute_vertex_normals() elif isinstance(src, trimesh.PointCloud): dst = open3d.PointCloud() dst.points = open3d.Vector3dVector(src.vertices) if src.colors: colors = src.colors colors = (colors[:, :3] / 255.0).astype(float) dst.colors = open3d.Vector3dVector(colors) elif isinstance(src, trimesh.scene.Camera): dst = open3d.PinholeCameraIntrinsic( width=src.resolution[0], height=src.resolution[1], fx=src.K[0, 0], fy=src.K[1, 1], cx=src.K[0, 2], cy=src.K[1, 2], ) elif isinstance(src, trimesh.path.Path3D): lines = [] for entity in src.entities: for i, j in zip(entity.points[:-1], entity.points[1:]): lines.append((i, j)) lines = np.vstack(lines) points = src.vertices dst = open3d.LineSet() dst.lines = open3d.Vector2iVector(lines) dst.points = open3d.Vector3dVector(points) elif isinstance(src, list): dst = [trimesh_to_open3d(x) for x in src] else: raise ValueError("Unsupported type of src: {}".format(type(src))) return dst
def __init__(self): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ++++++++++++++ set publishers and subscribers +++++++++++++++ #pose_cnn_param_file = rospy.get_param('/object_3d_pose/pose_cnn_param_file') print(pose_cnn_param_file) # +++++++++++++++++++++++++++++ load nets +++++++++++++++++++++++++++++ self.pose_detector = PosePredictor(pose_cnn_param_file, self.device) # +++++++++++++++++++++++++++++ load nets +++++++++++++++++++++++++++++ self.bridge = CvBridge() self.last_image_msg = None self.last_cloud_msg = None self.imag_sub = rospy.Subscriber(rospy.get_param(namespace + "object_3d_pose/image_topic"), Image, self.image_cb, queue_size=1) self.clou_sub = rospy.Subscriber(rospy.get_param(namespace + "object_3d_pose/cloud_topic"), PointCloud2, self.cloud_cb, queue_size=1) self.bbox_sub = rospy.Subscriber(rospy.get_param(namespace + "object_3d_pose/bbox_topic"), BoundingBoxes, self.bbox_cb, queue_size=1) self.pose3d_pub = rospy.Publisher(namespace + 'object_3d_pose/tool_pose', PoseStamped, queue_size=1) self.crop_pub = rospy.Publisher(namespace + 'object_3d_pose/crop_img', Image, queue_size=1) self.pcl_crop_pub = rospy.Publisher(namespace + 'object_3d_pose/debug_pcl', PointCloud2, queue_size=1) self.print_mesh = CMeshPublisher(rospy.get_param(namespace + "object_3d_pose/mesh_file"),"mesh","camera_rgb_optical_frame") v, f = readOff.read_off(cad_file) # CAD V = np.asarray(v) F = np.asarray(f) self.V = V self.F = F self.mesh = o3d.TriangleMesh() self.mesh.vertices = o3d.Vector3dVector(self.V) self.mesh.triangles = o3d.Vector3iVector(self.F) self.mesh.compute_vertex_normals() self.vis = o3d.visualization.Visualizer() self.vis.create_window() self.vis.add_geometry(self.mesh) self.vis.run() return
def plot_reconstruction(x, t, phi, grid_size): """ Plot the ground truth points, and the reconstructed patch by meshing the domain [0, 1]^2 and lifting the mesh to R^3 :param x: The ground truth points we are trying to fit :param t: The sample positions used to fit the mesh :param phi: The fitted neural network :param grid_size: The number of sample positions per axis in the meshgrid :return: None """ # I'm doing the input here so you don't crash if you never use this function and don't have OpenGL import open3d with torch.no_grad(): mesh_samples = torch.from_numpy(meshgrid_vertices(grid_size)).to(x) mesh_faces = meshgrid_face_indices(grid_size) mesh_vertices = phi(mesh_samples) recon_vertices = phi(t) gt_color = np.array([0.1, 0.7, 0.1]) recon_color = np.array([0.7, 0.1, 0.1]) mesh_color = np.array([0.1, 0.1, 0.7]) pcloud_gt = open3d.PointCloud() pcloud_gt.points = open3d.Vector3dVector(x.cpu().numpy()) pcloud_gt.paint_uniform_color(gt_color) pcloud_recon = open3d.PointCloud() pcloud_recon.points = open3d.Vector3dVector( recon_vertices.cpu().numpy()) pcloud_recon.paint_uniform_color(recon_color) mesh_recon = open3d.TriangleMesh() mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) open3d.draw_geometries([pcloud_gt, pcloud_recon, mesh_recon])
def get_smpl(joints, axes, amounts, translation=(0, 0, 0)): model = smplx.create('models', model_type='smpl', gender='male') body_pose = torch.zeros([1, 69]) for i in range(len(joints)): pose_index = int(joints[i] * 3 + axes[i]) body_pose[0, pose_index] = axang_mean[ pose_index] + axang_var[pose_index] * (amounts[i] * 2 - 1) output = model(body_pose=torch.Tensor(body_pose), return_verts=True) smpl_vertices = output.vertices.detach().cpu().numpy().squeeze() smpl_o3d = o3d.TriangleMesh() smpl_o3d.triangles = o3d.Vector3iVector(model.faces) smpl_o3d.vertices = o3d.Vector3dVector(smpl_vertices + np.array(translation)) smpl_o3d.compute_vertex_normals() smpl_o3d.paint_uniform_color([amounts[0] / 2 + 0.5, 0.3, 0.3]) return smpl_o3d
def __enter__(self): self.steps = 0 if self.visualize: if self.viz_mode == 'o3d': self.vis_o3d = o3d.Visualizer() self.vis_o3d.create_window() self.body_o3d = o3d.TriangleMesh() self.scan = o3d.PointCloud() self.lbl_stage = o3d.PointCloud() self.joints_opt = [] self.joints_gt = [] for i in range(25): color = cm.jet(i / 25.0)[:3] self.joints_opt.append(self.get_o3d_sphere(color=color)) self.joints_gt.append( self.get_o3d_sphere(color=color, radius=0.03)) else: self.mv = MeshViewer(body_color=self.body_color) return self
def main(model_folder, model_type='smplx', ext='npz', gender='neutral', plot_joints=False, plotting_module='pyrender', use_face_contour=False): model = smplx.create(model_folder, model_type=model_type, gender=gender, use_face_contour=use_face_contour, ext=ext) print(model) betas = torch.randn([1, 10], dtype=torch.float32) expression = torch.randn([1, 10], dtype=torch.float32) output = model(betas=betas, expression=expression, return_verts=True) vertices = output.vertices.detach().cpu().numpy().squeeze() joints = output.joints.detach().cpu().numpy().squeeze() print('Vertices shape =', vertices.shape) print('Joints shape =', joints.shape) if plotting_module == 'pyrender': import pyrender import trimesh vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8] tri_mesh = trimesh.Trimesh(vertices, model.faces, vertex_colors=vertex_colors) mesh = pyrender.Mesh.from_trimesh(tri_mesh) scene = pyrender.Scene() scene.add(mesh) if plot_joints: sm = trimesh.creation.uv_sphere(radius=0.005) sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0] tfs = np.tile(np.eye(4), (len(joints), 1, 1)) tfs[:, :3, 3] = joints joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs) scene.add(joints_pcl) pyrender.Viewer(scene, use_raymond_lighting=True) elif plotting_module == 'matplotlib': from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection fig = plt.figure() ax = fig.add_subplot(111, projection='3d') mesh = Poly3DCollection(vertices[model.faces], alpha=0.1) face_color = (1.0, 1.0, 0.9) edge_color = (0, 0, 0) mesh.set_edgecolor(edge_color) mesh.set_facecolor(face_color) ax.add_collection3d(mesh) ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') if plot_joints: ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], alpha=0.1) plt.show() elif plotting_module == 'open3d': import open3d as o3d mesh = o3d.TriangleMesh() mesh.vertices = o3d.Vector3dVector(vertices) mesh.triangles = o3d.Vector3iVector(model.faces) mesh.compute_vertex_normals() mesh.paint_uniform_color([0.3, 0.3, 0.3]) o3d.visualization.draw_geometries([mesh]) else: raise ValueError('Unknown plotting_module: {}'.format(plotting_module))
def moving_points(self): if type(self.fixed) is type(open3d.TriangleMesh()): return np.asarray(self.moving.vertices)
def main(args): fitting_dir = args.fitting_dir recording_name = os.path.abspath(fitting_dir).split("/")[-1] fitting_dir = osp.join(fitting_dir, 'results') scene_name = recording_name.split("_")[0] base_dir = args.base_dir cam2world_dir = osp.join(base_dir, 'cam2world') scene_dir = osp.join(base_dir, 'scenes') recording_dir = osp.join(base_dir, 'recordings', recording_name) color_dir = os.path.join(recording_dir, 'Color') female_subjects_ids = [162, 3452, 159, 3403] subject_id = int(recording_name.split('_')[1]) if subject_id in female_subjects_ids: gender = 'female' else: gender = 'male' cv2.namedWindow('frame', cv2.WINDOW_NORMAL) vis = o3d.Visualizer() vis.create_window() scene = o3d.io.read_triangle_mesh(osp.join(scene_dir, scene_name + '.ply')) with open(os.path.join(cam2world_dir, scene_name + '.json'), 'r') as f: trans = np.array(json.load(f)) vis.add_geometry(scene) model = smplx.create(args.model_folder, model_type='smplx', gender=gender, ext='npz', num_pca_comps=args.num_pca_comps, create_global_orient=True, create_body_pose=True, create_betas=True, create_left_hand_pose=True, create_right_hand_pose=True, create_expression=True, create_jaw_pose=True, create_leye_pose=True, create_reye_pose=True, create_transl=True ) count = 0 for img_name in sorted(os.listdir(fitting_dir))[args.start::args.step]: print('viz frame {}'.format(img_name)) with open(osp.join(fitting_dir, img_name, '000.pkl'), 'rb') as f: param = pickle.load(f) torch_param = {} for key in param.keys(): if key in ['pose_embedding', 'camera_rotation', 'camera_translation']: continue else: torch_param[key] = torch.tensor(param[key]) output = model(return_verts=True, **torch_param) vertices = output.vertices.detach().cpu().numpy().squeeze() if count == 0: body = o3d.TriangleMesh() vis.add_geometry(body) body.vertices = o3d.Vector3dVector(vertices) body.triangles = o3d.Vector3iVector(model.faces) body.vertex_normals = o3d.Vector3dVector([]) body.triangle_normals = o3d.Vector3dVector([]) body.compute_vertex_normals() body.transform(trans) color_img = cv2.imread(os.path.join(color_dir, img_name + '.jpg')) color_img = cv2.flip(color_img, 1) vis.update_geometry() while True: cv2.imshow('frame', color_img) vis.poll_events() vis.update_renderer() key = cv2.waitKey(30) if key == 27: break count += 1
def tr_mesh2pn_mesh(tr_mesh): pn_mesh = pn.TriangleMesh() pn_mesh.vertices = pn.Vector3dVector(np.array(tr_mesh.vertices)) pn_mesh.triangles = pn.Vector3iVector(np.array(tr_mesh.faces)) return pn_mesh
# read obj file filename = os.path.join(dirs.inpdir, 'subject_01/Model/frontal1/obj/110920150452_new.obj') obj = pw.Wavefront(filename) print('number of points', obj.number_of_points) print('number of faces', obj.number_of_faces) # read model face filename = models.bfm2017face12nomouth model = FaceModel(filename=filename) points = model.shape.points faces = model.shape.cells colors = model.color.colors print(model) # compute metrics metrics = MovingToFixedPointSetMetrics(moving=points, fixed=obj.points, registration=True) print(metrics) # compute mesh to mesh metrics fixed = obj2open3dmesh(obj) moving = open3d.TriangleMesh() moving.vertices = open3d.Vector3dVector(points) moving.triangles = open3d.Vector3iVector(faces) moving.vertex_colors = open3d.Vector3dVector(colors) moving.compute_vertex_normals() metrics = MeshToMeshMetrics(moving=moving, fixed=fixed, registration=1) print(metrics)
def pym_mesh2pn_mesh(pym_mesh): pn_mesh = pn.TriangleMesh() pn_mesh.vertices = pn.Vector3dVector(pym_mesh.vertices) pn_mesh.triangles = pn.Vector3iVector(pym_mesh.faces) return pn_mesh
def visualize_plane(annos, args, eps=0.9): """visualize plane """ colormap = np.array(colormap_255) / 255 junctions = [item['coordinate'] for item in annos['junctions']] if args.color == 'manhattan': manhattan = dict() for planes in annos['manhattan']: for planeID in planes['planeID']: manhattan[planeID] = planes['ID'] # extract hole vertices lines_holes = [] for semantic in annos['semantics']: if semantic['type'] in ['window', 'door']: for planeID in semantic['planeID']: lines_holes.extend( np.where(np.array( annos['planeLineMatrix'][planeID]))[0].tolist()) lines_holes = np.unique(lines_holes) _, vertices_holes = np.where( np.array(annos['lineJunctionMatrix'])[lines_holes]) vertices_holes = np.unique(vertices_holes) # load polygons polygons = [] for semantic in annos['semantics']: for planeID in semantic['planeID']: plane_anno = annos['planes'][planeID] lineIDs = np.where(np.array( annos['planeLineMatrix'][planeID]))[0].tolist() junction_pairs = [ np.where(np.array( annos['lineJunctionMatrix'][lineID]))[0].tolist() for lineID in lineIDs ] polygon = convert_lines_to_vertices(junction_pairs) vertices, faces = clip_polygon(polygon, vertices_holes, junctions, plane_anno) polygons.append([ vertices, faces, planeID, plane_anno['normal'], plane_anno['type'], semantic['type'] ]) plane_set = [] for i, (vertices, faces, planeID, normal, plane_type, semantic_type) in enumerate(polygons): # ignore the room ceiling if plane_type == 'ceiling' and semantic_type not in ['door', 'window']: continue plane_vis = open3d.TriangleMesh() plane_vis.vertices = open3d.Vector3dVector(vertices) plane_vis.triangles = open3d.Vector3iVector(faces) if args.color == 'normal': if np.dot(normal, [1, 0, 0]) > eps: plane_vis.paint_uniform_color(colormap[0]) elif np.dot(normal, [-1, 0, 0]) > eps: plane_vis.paint_uniform_color(colormap[1]) elif np.dot(normal, [0, 1, 0]) > eps: plane_vis.paint_uniform_color(colormap[2]) elif np.dot(normal, [0, -1, 0]) > eps: plane_vis.paint_uniform_color(colormap[3]) elif np.dot(normal, [0, 0, 1]) > eps: plane_vis.paint_uniform_color(colormap[4]) elif np.dot(normal, [0, 0, -1]) > eps: plane_vis.paint_uniform_color(colormap[5]) else: plane_vis.paint_uniform_color(colormap[6]) elif args.color == 'manhattan': # paint each plane with manhattan world if planeID not in manhattan.keys(): plane_vis.paint_uniform_color(colormap[6]) else: plane_vis.paint_uniform_color(colormap[manhattan[planeID]]) plane_set.append(plane_vis) draw_geometries_with_back_face(plane_set)
def get_mesh(self): mesh = open3d.TriangleMesh() mesh.vertices = open3d.Vector3dVector(self.vertices) mesh.triangles = open3d.Vector3iVector(self.faces) mesh.vertex_colors = open3d.Vector3dVector(self.colors) return mesh
def animate_flow(x, t, phi, grid_size, t_sample, mesh0=None): import open3d with torch.no_grad(): if mesh0 == None: mesh_samples = embed_3d( torch.from_numpy(meshgrid_vertices(grid_size)).to(x), t[0, 2]) print(mesh_samples.shape) mesh_faces = meshgrid_face_indices(grid_size) mesh_vertices = phi(mesh_samples)[:, 0:3] else: mesh_samples = torch.from_numpy(mesh0[0]).to(x) print("sample", mesh_samples.shape) mesh_faces = mesh0[1][:, 0:3] mesh_vertices = phi(mesh_samples)[:, 0:3] recon_vertices = phi(t)[:, 0:3] gt_color = np.array([0.1, 0.7, 0.1]) recon_color = np.array([0.7, 0.1, 0.1]) mesh_color = np.array([0.1, 0.1, 0.7]) curve_color = np.array([0.2, 0.2, 0.5]) pcloud_gt = open3d.PointCloud() pcloud_gt.points = open3d.Vector3dVector(x.cpu().numpy()) pcloud_gt.paint_uniform_color(gt_color) pcloud_recon = open3d.PointCloud() pcloud_recon.points = open3d.Vector3dVector( recon_vertices.cpu().numpy()) pcloud_recon.paint_uniform_color(recon_color) mesh_recon = open3d.TriangleMesh() mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) pc_initial = open3d.PointCloud() pc_initial.points = open3d.Vector3dVector(t.cpu().numpy()) pc_initial.paint_uniform_color(curve_color) flow_ode = open3d.LineSet() # print(x.shape) # print(t.shape) flow = get_Lines(phi, t[::t_sample, :], 30) print(len(flow[0])) flow_ode.points, flow_ode.lines = open3d.Vector3dVector(flow[0]), \ open3d.Vector2iVector(flow[1]) # flow_ode.colors = open3d.Vector3dVector(curve_color) # vis = open3d.Visualizer() # vis.create_window() # vis.remove_geometry(flow_ode) # for geom in [pcloud_gt, pcloud_recon, mesh_recon, pc_initial, flow_ode]: # vis.add_geometry(geometry=geom) # # for i in range(10): # # vis.remove_geometry(flow_ode) # # vis.remove_geometry(flow_ode) # vis.update_geometry() # vis.update_renderer() # vis.poll_events() store_frames = [] temp = mesh_samples store_frames.append(temp) for j in range(0, Nframes - 1): print(j) tj = ts[j] tnext = ts[j + 1] temp = phi.flow(tj, tnext, temp)[:, 0:3] store_frames.append(temp) def next_frame(vis): # ctr = vis.get_view_control() # ctr.rotate(10.0, 0.0) # global i, ts global i, ts, mesh0, Nframes if i >= Nframes: return True print(i) # if mesh0 == None: # mesh_faces = meshgrid_face_indices(grid_size) # else: # mesh_faces =mesh0[1] mesh_vertices = store_frames[i] i += 1 mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) vis.update_geometry() vis.update_renderer() return False def revert(vis): global i i = 0 return False key_to_callback = {} key_to_callback[ord(",")] = next_frame key_to_callback[ord(".")] = revert open3d.draw_geometries_with_key_callbacks( [pcloud_gt, pcloud_recon, mesh_recon, pc_initial, flow_ode], key_to_callback)
def update(self, edges, grid_triangles, color='red'): """ Updating only the edges (assuming points don't change). :return: None. """ if color == 'red': c = [1, 0, 0] elif color == 'green': c = [0, 1, 0] else: c = [0, 0, 1] lines = [[edge.p1.id, edge.p2.id] for edge in edges] for edge in edges: if edge.color == []: edge.color = c if edge.p1.id == 2 and edge.p2.id == 2 or edge.p2.id == 2 and edge.p1.id == 2: edge.color = [0, 0, 1] colors = [edge.color for edge in edges] line_set = o3d.geometry.LineSet() points = np.array([(point.x, point.y, point.z) for point in self.points]) line_set.points = o3d.Vector3dVector(points) line_set.lines = o3d.utility.Vector2iVector(lines) line_set.colors = o3d.utility.Vector3dVector(colors) facets = [] for triangle in grid_triangles: index_1 = self.points.index(triangle[0]) index_2 = self.points.index(triangle[1]) index_3 = self.points.index(triangle[2]) facets.append([index_1, index_2, index_3]) facets = np.asarray(facets).astype(np.int32) points_triangles = np.array([(point.x, point.y, point.z) for point in self.points]) mesh = o3d.TriangleMesh() mesh.vertices = o3d.Vector3dVector(points_triangles) mesh.triangles = o3d.Vector3iVector(facets) # Manual fix since i don't define the vertices of a triangle clockwise. If they are anti-clockwise, open3d # won't render their mesh. mesh.compute_triangle_normals() for i, n in enumerate(np.asarray(mesh.triangle_normals)): t = mesh.triangles[i] p_index = t[0] p = self.points[p_index] if np.dot(n, p.normal) < 0: mesh.triangles[i] = np.flip(t) self.visualizer.get_render_option().point_size = 3.5 self.visualizer.add_geometry(line_set) self.visualizer.add_geometry(mesh) # Rotate the object. ctr = self.visualizer.get_view_control() self.rotation_angle += 4 ctr.rotate(x=self.rotation_angle, y=0) self.visualizer.update_geometry() self.visualizer.poll_events() self.visualizer.update_renderer()