def get_smpl(pkl_data, json_data): gender = json_data['people'][0]['gender_gt'] print('Target height {}, weight {}'.format(json_data['people'][0]['height'], json_data['people'][0]['weight'])) betas = torch.Tensor(pkl_data['betas']).unsqueeze(0) pose = torch.Tensor(pkl_data['body_pose']).unsqueeze(0) transl = torch.Tensor(pkl_data['transl']).unsqueeze(0) global_orient = torch.Tensor(pkl_data['global_orient']).unsqueeze(0) model = smplx.create('models', model_type='smpl', gender=gender) output = model(betas=betas, body_pose=pose, transl=transl, global_orient=global_orient, return_verts=True) smpl_vertices = output.vertices.detach().cpu().numpy().squeeze() smpl_joints = output.joints.detach().cpu().numpy().squeeze() output_unposed = model(betas=betas, body_pose=pose * 0, transl=transl, global_orient=global_orient, return_verts=True) smpl_vertices_unposed = output_unposed.vertices.detach().cpu().numpy().squeeze() for i, lbl in enumerate(['Wingspan', 'Height', 'Thickness']): print('Actual', lbl, smpl_vertices_unposed[:, i].max() - smpl_vertices_unposed[:, i].min(), end=' ') print() smpl_trimesh = trimesh.Trimesh(vertices=np.asarray(smpl_vertices_unposed), faces=model.faces) print('Est weight from volume', smpl_trimesh.volume * 1.03 * 1000) # print('Pose embedding', pkl_data['pose_embedding']) # print('Body pose', np.array2string(pkl_data['body_pose'], separator=', ')) smpl_o3d = o3d.TriangleMesh() smpl_o3d.triangles = o3d.Vector3iVector(model.faces) smpl_o3d.vertices = o3d.Vector3dVector(smpl_vertices) smpl_o3d.compute_vertex_normals() # smpl_o3d.paint_uniform_color([0.3, 0.3, 0.3]) smpl_o3d_2 = o3d.TriangleMesh() smpl_o3d_2.triangles = o3d.Vector3iVector(model.faces) smpl_o3d_2.vertices = o3d.Vector3dVector(smpl_vertices + np.array([1.5, 0, 0])) smpl_o3d_2.compute_vertex_normals() smpl_o3d_2.paint_uniform_color([0.7, 0.3, 0.3]) # Visualize SMPL joints - Patrick camera = PerspectiveCamera(rotation=torch.tensor(pkl_data['camera_rotation']).unsqueeze(0), translation=torch.tensor(pkl_data['camera_translation']).unsqueeze(0), center=torch.tensor(pkl_data['camera_center']), focal_length_x=torch.tensor(pkl_data['camera_focal_length_x']), focal_length_y=torch.tensor(pkl_data['camera_focal_length_y'])) gt_pos_3d = camera.inverse_camera_tform(torch.tensor(pkl_data['gt_joints']).unsqueeze(0), 1.8).detach().squeeze(0).cpu().numpy() all_markers = [] for i in range(25): color = cm.jet(i / 25.0)[:3] # smpl_marker = get_o3d_sphere(color=color, pos=smpl_joints[i, :]) # all_markers.append(smpl_marker) pred_marker = get_o3d_sphere(color=color, pos=gt_pos_3d[i, :], radius=0.03) all_markers.append(pred_marker) return smpl_vertices, model.faces, smpl_o3d, smpl_o3d_2, all_markers
def stl2ply(stl_mesh): points = pd.DataFrame(stl_mesh.v0).append(pd.DataFrame( stl_mesh.v1)).append(pd.DataFrame(stl_mesh.v2)) points = points.drop_duplicates() points = points.reset_index(drop=True) points = pd.DataFrame(points.values, columns=['x', 'y', 'z']) points['index'] = points.index edge = pd.DataFrame( stl_mesh.points, columns=['x0', 'y0', 'z0', 'x1', 'y1', 'z1', 'x2', 'y2', 'z2']) edge['id0'] = edge.merge(points, how='left', left_on=['x0', 'y0', 'z0'], right_on=['x', 'y', 'z'])['index'] edge['id1'] = edge.merge(points, how='left', left_on=['x1', 'y1', 'z1'], right_on=['x', 'y', 'z'])['index'] edge['id2'] = edge.merge(points, how='left', left_on=['x2', 'y2', 'z2'], right_on=['x', 'y', 'z'])['index'] edge = edge[['id0', 'id1', 'id2']].values points = points[['x', 'y', 'z']].values ply_mesh = pn.TriangleMesh() ply_mesh.vertices = pn.Vector3dVector(points) ply_mesh.triangles = pn.Vector3iVector(edge) ply_mesh.triangle_normals = pn.Vector3dVector(stl_mesh.normals) return ply_mesh
def get_all_smpl(pkl_data, json_data): gender = json_data['people'][0]['gender_gt'] all_meshes = [] trans = np.array([4, 0, 0]) for i, result in enumerate(pkl_data['all_results']): t = trans + [0, i * 3, 0] betas = torch.Tensor(result['betas']).unsqueeze(0) pose = torch.Tensor(result['body_pose']).unsqueeze(0) transl = torch.Tensor(result['transl']).unsqueeze(0) global_orient = torch.Tensor(result['global_orient']).unsqueeze(0) model = smplx.create('models', model_type='smpl', gender=gender) output = model(betas=betas, body_pose=pose, transl=transl, global_orient=global_orient, return_verts=True) smpl_vertices = output.vertices.detach().cpu().numpy().squeeze() smpl_o3d = o3d.TriangleMesh() smpl_o3d.triangles = o3d.Vector3iVector(model.faces) smpl_o3d.vertices = o3d.Vector3dVector(smpl_vertices) smpl_o3d.compute_vertex_normals() smpl_o3d.translate(t) for idx, key in enumerate(result['loss_dict'].keys()): lbl = '{} {:.2f}'.format(key, float(result['loss_dict'][key])) all_meshes.append(text_3d(lbl, t + [1, idx * 0.2 - 1, 2], direction=(0.01, 0, -1), degree=-90, font_size=150, density=0.2)) all_meshes.append(smpl_o3d) return all_meshes
def next_frame(vis): # ctr = vis.get_view_control() # ctr.rotate(10.0, 0.0) # global i, ts global i, ts, mesh0, Nframes if i >= Nframes: return True print(i) # if mesh0 == None: # mesh_faces = meshgrid_face_indices(grid_size) # else: # mesh_faces =mesh0[1] mesh_vertices = store_frames[i] i += 1 mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) vis.update_geometry() vis.update_renderer() return False
def test_benchmark(): vector_size = int(2e6) x = np.random.randint(10, size=(vector_size, 3)).astype(np.float64) print("\nopen3d.Vector3dVector:", x.shape) start_time = time.time() y = open3d.Vector3dVector(x) print("open3d -> numpy: %.6fs" % (time.time() - start_time)) start_time = time.time() z = np.asarray(y) print("numpy -> open3d: %.6fs" % (time.time() - start_time)) np.testing.assert_allclose(x, z) print("\nopen3d.Vector3iVector:", x.shape) x = np.random.randint(10, size=(vector_size, 3)).astype(np.int32) start_time = time.time() y = open3d.Vector3iVector(x) print("open3d -> numpy: %.6fs" % (time.time() - start_time)) start_time = time.time() z = np.asarray(y) print("numpy -> open3d: %.6fs" % (time.time() - start_time)) np.testing.assert_allclose(x, z) print("\nopen3d.Vector2iVector:", x.shape) x = np.random.randint(10, size=(vector_size, 2)).astype(np.int32) start_time = time.time() y = open3d.Vector2iVector(x) print("open3d -> numpy: %.6fs" % (time.time() - start_time)) start_time = time.time() z = np.asarray(y) print("numpy -> open3d: %.6fs" % (time.time() - start_time)) np.testing.assert_allclose(x, z)
def convert_to_IFS(self): mesh = open3d.TriangleMesh() mesh.vertices = self.vertex_data if (len(self.vertex_colors) != 0): mesh.vertex_colors = self.vertex_colors faces = [self.__get_face_indices(f) for f in self.faces] mesh.triangles = open3d.Vector3iVector(faces) return mesh
def obj2open3dmesh(obj): mesh = open3d.TriangleMesh() mesh.vertices = open3d.Vector3dVector(obj.points) mesh.triangles = open3d.Vector3iVector(obj.faces) mesh.vertex_colors = open3d.Vector3dVector(obj.colors) if mesh.compute_vertex_normals() is False: mesh.compute_vertex_normals() return mesh
def plot_flow(x, t, phi, grid_size, t_sample): """ Plot the ground truth points, and the reconstructed patch by meshing the domain [0, 1]^2 and lifting the mesh to R^3 :param x: The ground truth points we are trying to fit :param t: The sample positions used to fit the mesh :param phi: The fitted neural network :param grid_size: The number of sample positions per axis in the meshgrid :return: None """ # I'm doing the input here so you don't crash if you never use this function and don't have OpenGL import open3d with torch.no_grad(): mesh_samples = embed_3d( torch.from_numpy(meshgrid_vertices(grid_size)).to(x), t[0, 2]) mesh_faces = meshgrid_face_indices(grid_size) mesh_vertices = phi(mesh_samples)[:, 0:3] recon_vertices = phi(t)[:, 0:3] gt_color = np.array([0.1, 0.7, 0.1]) recon_color = np.array([0.7, 0.1, 0.1]) mesh_color = np.array([0.1, 0.1, 0.7]) curve_color = np.array([0.2, 0.2, 0.5]) pcloud_gt = open3d.PointCloud() pcloud_gt.points = open3d.Vector3dVector(phi.invert(x).cpu().numpy()) pcloud_gt.paint_uniform_color(gt_color) pcloud_recon = open3d.PointCloud() pcloud_recon.points = open3d.Vector3dVector( recon_vertices.cpu().numpy()) pcloud_recon.paint_uniform_color(recon_color) mesh_recon = open3d.TriangleMesh() mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) pc_initial = open3d.PointCloud() pc_initial.points = open3d.Vector3dVector(t.cpu().numpy()) pc_initial.paint_uniform_color(curve_color) flow_ode = open3d.LineSet() # print(x.shape) # print(t.shape) flow = get_Lines(phi, t[::t_sample, :], 15) flow_ode.points, flow_ode.lines = open3d.Vector3dVector(flow[0]), \ open3d.Vector2iVector(flow[1]) # flow_ode.colors = open3d.Vector3dVector(curve_color) open3d.draw_geometries( [pcloud_gt, pcloud_recon, mesh_recon, pc_initial, flow_ode])
def show_mesh(vertices, triangle, color=[0, 0, 0], only_genmesh=False): mesh = open3d.TriangleMesh() mesh.vertices = open3d.Vector3dVector(vertices) mesh.triangles = open3d.Vector3iVector(triangle) mesh.paint_uniform_color(color) centroid = np.mean(vertices, 0) mesh_frame = open3d.create_mesh_coordinate_frame(size=1.6, origin=centroid) if not only_genmesh: open3d.draw_geometries([mesh, mesh_frame]) return mesh
def visualize_mesh_v(mesh, vertices_colors): open3d_mesh = open3d.geometry.TriangleMesh() open3d_mesh.vertices = open3d.Vector3dVector(mesh['vertices']) #open3d_mesh.triangles = open3d.Vector3iVector(mesh['faces'][:, ::-1]) open3d_mesh.triangles = open3d.Vector3iVector(mesh['faces']) if vertices_colors is not None: open3d_mesh.vertex_colors = open3d.Vector3dVector(vertices_colors) else: open3d_mesh.vertex_colors = open3d.Vector3dVector( np.random.uniform(0, 1, size=(mesh['n_vertices'], 3))) open3d.draw_geometries([open3d_mesh])
def trimesh_to_open3d(src): if isinstance(src, trimesh.Trimesh): dst = open3d.TriangleMesh() dst.vertices = open3d.Vector3dVector(src.vertices) dst.triangles = open3d.Vector3iVector(src.faces) vertex_colors = np.zeros((len(src.vertices), 3), dtype=float) for face, face_color in zip(src.faces, src.visual.face_colors): vertex_colors[face] += face_color[:3] / 255.0 # uint8 -> float indices, counts = np.unique(src.faces.flatten(), return_counts=True) vertex_colors[indices] /= counts[:, None] dst.vertex_colors = open3d.Vector3dVector(vertex_colors) dst.compute_vertex_normals() elif isinstance(src, trimesh.PointCloud): dst = open3d.PointCloud() dst.points = open3d.Vector3dVector(src.vertices) if src.colors: colors = src.colors colors = (colors[:, :3] / 255.0).astype(float) dst.colors = open3d.Vector3dVector(colors) elif isinstance(src, trimesh.scene.Camera): dst = open3d.PinholeCameraIntrinsic( width=src.resolution[0], height=src.resolution[1], fx=src.K[0, 0], fy=src.K[1, 1], cx=src.K[0, 2], cy=src.K[1, 2], ) elif isinstance(src, trimesh.path.Path3D): lines = [] for entity in src.entities: for i, j in zip(entity.points[:-1], entity.points[1:]): lines.append((i, j)) lines = np.vstack(lines) points = src.vertices dst = open3d.LineSet() dst.lines = open3d.Vector2iVector(lines) dst.points = open3d.Vector3dVector(points) elif isinstance(src, list): dst = [trimesh_to_open3d(x) for x in src] else: raise ValueError("Unsupported type of src: {}".format(type(src))) return dst
def create_plane(center, normal, plane_size, axis_size=0.03): mesh = o3d.TriangleMesh() quat = np.r_[normal, 0] e = np.array([0, 1, 0]) cross_vec_1 = np.cross(normal, e) if (cross_vec_1[0] < 0): cross_vec_1 *= -1 cross_vec_1 /= np.linalg.norm(cross_vec_1) cross_vec_2 = np.cross(normal, cross_vec_1) if (cross_vec_2[2] < 0): cross_vec_2 *= -1 cross_vec_2 /= np.linalg.norm(cross_vec_2) vec_set_basis = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) vec_set_normal = np.array([normal, cross_vec_1, cross_vec_2]) rr = Rot.match_vectors(vec_set_basis, vec_set_normal)[0] transform_mat = np.eye(4) transform_mat[:3, :3] = rr.as_dcm().T transform_mat[:3, 3] = center p1 = np.array([-plane_size * 0.5, 0, -plane_size * 0.5]) p2 = np.array([plane_size * 0.5, 0, -plane_size * 0.5]) p3 = np.array([plane_size * 0.5, 0, plane_size * 0.5]) p4 = np.array([-plane_size * 0.5, 0, plane_size * 0.5]) _mesh_points = np.array([p1, p2, p3, p4], dtype=np.float) mesh.vertices = o3d.Vector3dVector(_mesh_points) _mesh_triangles = np.array([[0, 2, 1], [2, 0, 3]]) _mesh_triangles = np.concatenate( [_mesh_triangles, _mesh_triangles[:, ::-1]], axis=0) mesh.triangles = o3d.Vector3iVector(_mesh_triangles) mesh.paint_uniform_color([1, 0.706, 0]) mesh.compute_vertex_normals() mesh.transform(transform_mat) mesh_cylinder = o3d.create_mesh_cylinder(radius=axis_size / 50, height=axis_size) mesh_cylinder.paint_uniform_color([0, 1.0, 0]) transform_yaw = np.array([[1, 0, 0, 0], [0, 0, 1, axis_size / 2], [0, 1, 0, 0], [0, 0, 0, 1]]) mesh_cylinder.transform(transform_yaw) mesh_cylinder.transform(transform_mat) return mesh, mesh_cylinder
def __init__(self): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ++++++++++++++ set publishers and subscribers +++++++++++++++ #pose_cnn_param_file = rospy.get_param('/object_3d_pose/pose_cnn_param_file') print(pose_cnn_param_file) # +++++++++++++++++++++++++++++ load nets +++++++++++++++++++++++++++++ self.pose_detector = PosePredictor(pose_cnn_param_file, self.device) # +++++++++++++++++++++++++++++ load nets +++++++++++++++++++++++++++++ self.bridge = CvBridge() self.last_image_msg = None self.last_cloud_msg = None self.imag_sub = rospy.Subscriber(rospy.get_param(namespace + "object_3d_pose/image_topic"), Image, self.image_cb, queue_size=1) self.clou_sub = rospy.Subscriber(rospy.get_param(namespace + "object_3d_pose/cloud_topic"), PointCloud2, self.cloud_cb, queue_size=1) self.bbox_sub = rospy.Subscriber(rospy.get_param(namespace + "object_3d_pose/bbox_topic"), BoundingBoxes, self.bbox_cb, queue_size=1) self.pose3d_pub = rospy.Publisher(namespace + 'object_3d_pose/tool_pose', PoseStamped, queue_size=1) self.crop_pub = rospy.Publisher(namespace + 'object_3d_pose/crop_img', Image, queue_size=1) self.pcl_crop_pub = rospy.Publisher(namespace + 'object_3d_pose/debug_pcl', PointCloud2, queue_size=1) self.print_mesh = CMeshPublisher(rospy.get_param(namespace + "object_3d_pose/mesh_file"),"mesh","camera_rgb_optical_frame") v, f = readOff.read_off(cad_file) # CAD V = np.asarray(v) F = np.asarray(f) self.V = V self.F = F self.mesh = o3d.TriangleMesh() self.mesh.vertices = o3d.Vector3dVector(self.V) self.mesh.triangles = o3d.Vector3iVector(self.F) self.mesh.compute_vertex_normals() self.vis = o3d.visualization.Visualizer() self.vis.create_window() self.vis.add_geometry(self.mesh) self.vis.run() return
def plot_reconstruction(x, t, phi, grid_size): """ Plot the ground truth points, and the reconstructed patch by meshing the domain [0, 1]^2 and lifting the mesh to R^3 :param x: The ground truth points we are trying to fit :param t: The sample positions used to fit the mesh :param phi: The fitted neural network :param grid_size: The number of sample positions per axis in the meshgrid :return: None """ # I'm doing the input here so you don't crash if you never use this function and don't have OpenGL import open3d with torch.no_grad(): mesh_samples = torch.from_numpy(meshgrid_vertices(grid_size)).to(x) mesh_faces = meshgrid_face_indices(grid_size) mesh_vertices = phi(mesh_samples) recon_vertices = phi(t) gt_color = np.array([0.1, 0.7, 0.1]) recon_color = np.array([0.7, 0.1, 0.1]) mesh_color = np.array([0.1, 0.1, 0.7]) pcloud_gt = open3d.PointCloud() pcloud_gt.points = open3d.Vector3dVector(x.cpu().numpy()) pcloud_gt.paint_uniform_color(gt_color) pcloud_recon = open3d.PointCloud() pcloud_recon.points = open3d.Vector3dVector( recon_vertices.cpu().numpy()) pcloud_recon.paint_uniform_color(recon_color) mesh_recon = open3d.TriangleMesh() mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) open3d.draw_geometries([pcloud_gt, pcloud_recon, mesh_recon])
def get_smpl(joints, axes, amounts, translation=(0, 0, 0)): model = smplx.create('models', model_type='smpl', gender='male') body_pose = torch.zeros([1, 69]) for i in range(len(joints)): pose_index = int(joints[i] * 3 + axes[i]) body_pose[0, pose_index] = axang_mean[ pose_index] + axang_var[pose_index] * (amounts[i] * 2 - 1) output = model(body_pose=torch.Tensor(body_pose), return_verts=True) smpl_vertices = output.vertices.detach().cpu().numpy().squeeze() smpl_o3d = o3d.TriangleMesh() smpl_o3d.triangles = o3d.Vector3iVector(model.faces) smpl_o3d.vertices = o3d.Vector3dVector(smpl_vertices + np.array(translation)) smpl_o3d.compute_vertex_normals() smpl_o3d.paint_uniform_color([amounts[0] / 2 + 0.5, 0.3, 0.3]) return smpl_o3d
def obj_to_mesh(file, mesh): vertices, triangles = [], [] with open(file, 'r') as f: for line in f.readlines(): if line[0] == 'v': vertices.append(list(map(float, line.split()[1:]))) elif line[0] == 'f': triangles.append( list(map(lambda x: int(x) - 1, line.split()[1:]))) f.close() #mesh[0].vertices=open3d.Vector3dVector(np.array(vertices)) mesh[0].triangles = open3d.Vector3iVector(np.array(triangles)) mesh[1].triangles = mesh[0].triangles mesh[2].triangles = mesh[0].triangles new = np.array(mesh[0].vertices) old = np.array(vertices) diff = new - old #print(np.max(diff[:,0]),np.max(diff[:,1]),np.max(diff[:,2])) #print(np.mean(diff[:,0]),np.mean(diff[:,1]),np.mean(diff[:,2])) temp, temp1 = [], [] for i in range(len(diff)): temp.append(math.sqrt(diff[i][0]**2 + diff[i][1]**2 + diff[i][2]**2)) temp1.append(math.sqrt(old[i][0]**2 + old[i][1]**2 + old[i][2]**2))
def bbox_cb(self,msg): if self.last_cloud_msg is None or self.last_image_msg is None: print("no image rcvd") return img = self.bridge.imgmsg_to_cv2(self.last_image_msg)#[:,:,::-1] # ros msg to img name = 'lipton_lemon'#namespace[1:-1] for bb in msg.bounding_boxes: if bb.Class == name: print([bb.xmin,bb.xmax,bb.ymin,bb.ymax]) bbox_ = bb break img_crop = img[bbox_.ymin:bbox_.ymax, bbox_.xmin:bbox_.xmax] #[bbox[1]:bbox[3], bbox[0]:bbox[2]] self.crop_pub.publish(self.bridge.cv2_to_imgmsg(img_crop)) bbox = [bbox_.xmin, bbox_.ymin, bbox_.xmax, bbox_.ymax] bbox2 = bbox bbox2[0] = bbox2[0] - 10 bbox2[1] = bbox2[1] - 10 bbox2[2] = bbox2[2] + 10 bbox2[3] = bbox2[3] + 10 # +++++++++++++++++++++++++++++ pose cnn +++++++++++++++++++++++++++++ start = time.time() R = self.pose_detector.predict(img_crop) elapsed_time = time.time() - start #print("poseCNN forward time is: " + str(elapsed_time) + "sec" + "\n") # +++++++++++++++++++++++++++++ pose cnn +++++++++++++++++++++++++++++ # +++++++++++++++++++++++++++++ ICP +++++++++++++++++++++++++++++ Rc = np.asarray([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) # this rotation matrix aligns camera coords with depth coords #Rc = np.eye(3)#np.asarray([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) # this rotation matrix aligns camera coords with depth coords #Rc = np.asarray([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) start = time.time() principalAxis_yellow = np.asarray([0,0,1]) # principal axis = z axis, which is used in ICP #R = np.asarray([-0.342020143, 0.89370079, 0.290380989, -0.939692621, -0.325280486, -0.105690037 , 2.78E-17, -0.309016994 , 0.951056516]).reshape((3,3)).transpose() #8.jpg #R = np.asarray([0.939692621, 0.241844763, 0.241844763, -0.342020143, 0.664463024, 0.664463024, 1.39E-17, -0.707106781, 0.707106781]).reshape((3,3)).transpose() #53.jpg #R = np.asarray([-0.004125865,-0.650128507,-0.75981307, -0.134487097, -0.752555974,0.644649305, -0.990906755, 0.10484479, -0.084328952]).reshape((3,3)).transpose() #R = np.asarray([-0.185202, -0.874768, -0.447751, -0.00197658, -0.455301, 0.890335, -0.982698, 0.165777, 0.0825937]).reshape((3,3)).transpose() #R = np.asarray([-0.931585, -0.329211 , -0.154172 , 0.00915838 , -0.445225 , 0.895372 , -0.363408 , 0.832703 , 0.41778]).reshape((3,3)).transpose() #R = np.asarray([0.933773, 0.325102 , 0.149587, 0.0121103 , -0.446465 , 0.894719 , 0.357661 , -0.833653 , -0.420834]).reshape((3,3)).transpose() #R = np.asarray([0.874868 , -0.426461 ,-0.229648 , -0.0138401, 0.45192, -0.891951, 0.484164, 0.783517, 0.389468]).reshape((3,3)).transpose() #print(R) R_init = R x = np.dot(R_init, self.V.transpose()).transpose() #mesh = o3d.TriangleMesh() self.mesh.vertices = o3d.Vector3dVector(x) self.mesh.triangles = o3d.Vector3iVector(self.F) self.mesh.compute_vertex_normals() self.vis.update_geometry() self.vis.poll_events() self.vis.update_renderer() #self.vis.add_geometry(mesh) self.vis.run() self.vis.destroy_window()
def animate_flow(x, t, phi, grid_size, t_sample, mesh0=None): import open3d with torch.no_grad(): if mesh0 == None: mesh_samples = embed_3d( torch.from_numpy(meshgrid_vertices(grid_size)).to(x), t[0, 2]) print(mesh_samples.shape) mesh_faces = meshgrid_face_indices(grid_size) mesh_vertices = phi(mesh_samples)[:, 0:3] else: mesh_samples = torch.from_numpy(mesh0[0]).to(x) print("sample", mesh_samples.shape) mesh_faces = mesh0[1][:, 0:3] mesh_vertices = phi(mesh_samples)[:, 0:3] recon_vertices = phi(t)[:, 0:3] gt_color = np.array([0.1, 0.7, 0.1]) recon_color = np.array([0.7, 0.1, 0.1]) mesh_color = np.array([0.1, 0.1, 0.7]) curve_color = np.array([0.2, 0.2, 0.5]) pcloud_gt = open3d.PointCloud() pcloud_gt.points = open3d.Vector3dVector(x.cpu().numpy()) pcloud_gt.paint_uniform_color(gt_color) pcloud_recon = open3d.PointCloud() pcloud_recon.points = open3d.Vector3dVector( recon_vertices.cpu().numpy()) pcloud_recon.paint_uniform_color(recon_color) mesh_recon = open3d.TriangleMesh() mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) pc_initial = open3d.PointCloud() pc_initial.points = open3d.Vector3dVector(t.cpu().numpy()) pc_initial.paint_uniform_color(curve_color) flow_ode = open3d.LineSet() # print(x.shape) # print(t.shape) flow = get_Lines(phi, t[::t_sample, :], 30) print(len(flow[0])) flow_ode.points, flow_ode.lines = open3d.Vector3dVector(flow[0]), \ open3d.Vector2iVector(flow[1]) # flow_ode.colors = open3d.Vector3dVector(curve_color) # vis = open3d.Visualizer() # vis.create_window() # vis.remove_geometry(flow_ode) # for geom in [pcloud_gt, pcloud_recon, mesh_recon, pc_initial, flow_ode]: # vis.add_geometry(geometry=geom) # # for i in range(10): # # vis.remove_geometry(flow_ode) # # vis.remove_geometry(flow_ode) # vis.update_geometry() # vis.update_renderer() # vis.poll_events() store_frames = [] temp = mesh_samples store_frames.append(temp) for j in range(0, Nframes - 1): print(j) tj = ts[j] tnext = ts[j + 1] temp = phi.flow(tj, tnext, temp)[:, 0:3] store_frames.append(temp) def next_frame(vis): # ctr = vis.get_view_control() # ctr.rotate(10.0, 0.0) # global i, ts global i, ts, mesh0, Nframes if i >= Nframes: return True print(i) # if mesh0 == None: # mesh_faces = meshgrid_face_indices(grid_size) # else: # mesh_faces =mesh0[1] mesh_vertices = store_frames[i] i += 1 mesh_recon.vertices = open3d.Vector3dVector( mesh_vertices.cpu().numpy()) mesh_recon.triangles = open3d.Vector3iVector(mesh_faces) mesh_recon.compute_vertex_normals() mesh_recon.paint_uniform_color(mesh_color) vis.update_geometry() vis.update_renderer() return False def revert(vis): global i i = 0 return False key_to_callback = {} key_to_callback[ord(",")] = next_frame key_to_callback[ord(".")] = revert open3d.draw_geometries_with_key_callbacks( [pcloud_gt, pcloud_recon, mesh_recon, pc_initial, flow_ode], key_to_callback)
def tr_mesh2pn_mesh(tr_mesh): pn_mesh = pn.TriangleMesh() pn_mesh.vertices = pn.Vector3dVector(np.array(tr_mesh.vertices)) pn_mesh.triangles = pn.Vector3iVector(np.array(tr_mesh.faces)) return pn_mesh
def fitting_func(backward=True): if backward: optimizer.zero_grad() body_pose = vposer.decode(pose_embedding, output_type='aa').view( pose_embedding.shape[0], -1) if use_vposer else None if append_wrists: wrist_pose = torch.zeros([body_pose.shape[0], 6], dtype=body_pose.dtype, device=body_pose.device) body_pose = torch.cat([body_pose, wrist_pose], dim=1) # Actual optimization occurs here body_model_output = body_model(return_verts=return_verts, body_pose=body_pose, return_full_pose=return_full_pose) total_loss = loss(body_model_output, camera=camera, gt_joints=gt_joints, body_model=body_model, body_model_faces=faces_tensor, joints_conf=joints_conf, joint_weights=joint_weights, pose_embedding=pose_embedding, use_vposer=use_vposer, scan_tensor=scan_tensor, visualize=self.visualize, **kwargs) if backward: total_loss.backward(create_graph=create_graph) if self.visualize: model_output = body_model(return_verts=True, body_pose=body_pose) vertices = model_output.vertices[ 0, :, :].detach().cpu().numpy() joints = model_output.joints[0, :, :].detach().cpu().numpy() # gt_joints_b0 = gt_joints[0, :, :].unsqueeze(0) if self.steps == 0 and self.viz_mode == 'o3d': self.body_o3d.vertices = o3d.Vector3dVector(vertices) self.body_o3d.triangles = o3d.Vector3iVector( body_model.faces) self.body_o3d.vertex_normals = o3d.Vector3dVector([]) self.body_o3d.triangle_normals = o3d.Vector3dVector([]) self.body_o3d.compute_vertex_normals() self.vis_o3d.add_geometry(self.body_o3d) joints_gt_3d = camera.inverse_camera_tform(gt_joints, 1.7) # Visualize SMPL joints - Patrick for i in range(25): mean = np.asarray( self.joints_opt[i].vertices).mean(axis=0) self.joints_opt[i].translate(joints[i, :] - mean) self.vis_o3d.add_geometry(self.joints_opt[i]) j = joints_gt_3d[0, i, :].detach().cpu().numpy() self.joints_gt[i].translate(j) self.vis_o3d.add_geometry(self.joints_gt[i]) # Visualize camera camera_origin = camera.translation[ 0, :].detach().cpu().numpy() self.vis_o3d.add_geometry( self.get_o3d_sphere([1.0, 0.0, 0.0], pos=camera_origin)) if scan_tensor is not None: scan_batch = scan_tensor.points_list()[0] self.scan.points = o3d.Vector3dVector( scan_batch.detach().cpu().numpy()) N = scan_batch.shape[0] self.scan.colors = o3d.Vector3dVector( np.tile([1.00, 0.75, 0.80], [N, 1])) self.vis_o3d.add_geometry(self.scan) lbl = 'Subj {} Sample {}'.format( global_vars.cur_participant[0], global_vars.cur_sample[0]) self.vis_o3d.add_geometry( utils.text_3d(lbl, (0, -1.5, 2), direction=(0.01, 0, -1), degree=-90, font_size=200, density=0.15)) self.vis_o3d.add_geometry(self.lbl_stage) self.vis_o3d.update_geometry() self.vis_o3d.poll_events() self.vis_o3d.update_renderer() elif self.steps % self.summary_steps == 0: if self.viz_mode == 'o3d': self.body_o3d.vertices = o3d.Vector3dVector(vertices) self.body_o3d.triangles = o3d.Vector3iVector( body_model.faces) self.body_o3d.vertex_normals = o3d.Vector3dVector([]) self.body_o3d.triangle_normals = o3d.Vector3dVector([]) self.body_o3d.compute_vertex_normals() lbl2 = 'Orient {} Stage {}'.format( global_vars.cur_orientation, global_vars.cur_opt_stage) lbl2_pcd = utils.text_3d(lbl2, (0, -1.7, 2), direction=(0.01, 0, -1), degree=-90, font_size=200, density=0.15) self.lbl_stage.points = lbl2_pcd.points # Visualize SMPL joints - Patrick for i in range(25): mean = np.asarray( self.joints_opt[i].vertices).mean(axis=0) self.joints_opt[i].translate(joints[i, :] - mean) self.vis_o3d.update_geometry() self.vis_o3d.poll_events() self.vis_o3d.update_renderer() else: self.mv.update_mesh(vertices.squeeze(), body_model.faces) self.steps += 1 return total_loss
def pym_mesh2pn_mesh(pym_mesh): pn_mesh = pn.TriangleMesh() pn_mesh.vertices = pn.Vector3dVector(pym_mesh.vertices) pn_mesh.triangles = pn.Vector3iVector(pym_mesh.faces) return pn_mesh
def visualize_plane(annos, args, eps=0.9): """visualize plane """ colormap = np.array(colormap_255) / 255 junctions = [item['coordinate'] for item in annos['junctions']] if args.color == 'manhattan': manhattan = dict() for planes in annos['manhattan']: for planeID in planes['planeID']: manhattan[planeID] = planes['ID'] # extract hole vertices lines_holes = [] for semantic in annos['semantics']: if semantic['type'] in ['window', 'door']: for planeID in semantic['planeID']: lines_holes.extend( np.where(np.array( annos['planeLineMatrix'][planeID]))[0].tolist()) lines_holes = np.unique(lines_holes) _, vertices_holes = np.where( np.array(annos['lineJunctionMatrix'])[lines_holes]) vertices_holes = np.unique(vertices_holes) # load polygons polygons = [] for semantic in annos['semantics']: for planeID in semantic['planeID']: plane_anno = annos['planes'][planeID] lineIDs = np.where(np.array( annos['planeLineMatrix'][planeID]))[0].tolist() junction_pairs = [ np.where(np.array( annos['lineJunctionMatrix'][lineID]))[0].tolist() for lineID in lineIDs ] polygon = convert_lines_to_vertices(junction_pairs) vertices, faces = clip_polygon(polygon, vertices_holes, junctions, plane_anno) polygons.append([ vertices, faces, planeID, plane_anno['normal'], plane_anno['type'], semantic['type'] ]) plane_set = [] for i, (vertices, faces, planeID, normal, plane_type, semantic_type) in enumerate(polygons): # ignore the room ceiling if plane_type == 'ceiling' and semantic_type not in ['door', 'window']: continue plane_vis = open3d.TriangleMesh() plane_vis.vertices = open3d.Vector3dVector(vertices) plane_vis.triangles = open3d.Vector3iVector(faces) if args.color == 'normal': if np.dot(normal, [1, 0, 0]) > eps: plane_vis.paint_uniform_color(colormap[0]) elif np.dot(normal, [-1, 0, 0]) > eps: plane_vis.paint_uniform_color(colormap[1]) elif np.dot(normal, [0, 1, 0]) > eps: plane_vis.paint_uniform_color(colormap[2]) elif np.dot(normal, [0, -1, 0]) > eps: plane_vis.paint_uniform_color(colormap[3]) elif np.dot(normal, [0, 0, 1]) > eps: plane_vis.paint_uniform_color(colormap[4]) elif np.dot(normal, [0, 0, -1]) > eps: plane_vis.paint_uniform_color(colormap[5]) else: plane_vis.paint_uniform_color(colormap[6]) elif args.color == 'manhattan': # paint each plane with manhattan world if planeID not in manhattan.keys(): plane_vis.paint_uniform_color(colormap[6]) else: plane_vis.paint_uniform_color(colormap[manhattan[planeID]]) plane_set.append(plane_vis) draw_geometries_with_back_face(plane_set)
def main(args): fitting_dir = args.fitting_dir recording_name = os.path.abspath(fitting_dir).split("/")[-1] fitting_dir = osp.join(fitting_dir, 'results') scene_name = recording_name.split("_")[0] base_dir = args.base_dir cam2world_dir = osp.join(base_dir, 'cam2world') scene_dir = osp.join(base_dir, 'scenes') recording_dir = osp.join(base_dir, 'recordings', recording_name) color_dir = os.path.join(recording_dir, 'Color') female_subjects_ids = [162, 3452, 159, 3403] subject_id = int(recording_name.split('_')[1]) if subject_id in female_subjects_ids: gender = 'female' else: gender = 'male' cv2.namedWindow('frame', cv2.WINDOW_NORMAL) vis = o3d.Visualizer() vis.create_window() scene = o3d.io.read_triangle_mesh(osp.join(scene_dir, scene_name + '.ply')) with open(os.path.join(cam2world_dir, scene_name + '.json'), 'r') as f: trans = np.array(json.load(f)) vis.add_geometry(scene) model = smplx.create(args.model_folder, model_type='smplx', gender=gender, ext='npz', num_pca_comps=args.num_pca_comps, create_global_orient=True, create_body_pose=True, create_betas=True, create_left_hand_pose=True, create_right_hand_pose=True, create_expression=True, create_jaw_pose=True, create_leye_pose=True, create_reye_pose=True, create_transl=True ) count = 0 for img_name in sorted(os.listdir(fitting_dir))[args.start::args.step]: print('viz frame {}'.format(img_name)) with open(osp.join(fitting_dir, img_name, '000.pkl'), 'rb') as f: param = pickle.load(f) torch_param = {} for key in param.keys(): if key in ['pose_embedding', 'camera_rotation', 'camera_translation']: continue else: torch_param[key] = torch.tensor(param[key]) output = model(return_verts=True, **torch_param) vertices = output.vertices.detach().cpu().numpy().squeeze() if count == 0: body = o3d.TriangleMesh() vis.add_geometry(body) body.vertices = o3d.Vector3dVector(vertices) body.triangles = o3d.Vector3iVector(model.faces) body.vertex_normals = o3d.Vector3dVector([]) body.triangle_normals = o3d.Vector3dVector([]) body.compute_vertex_normals() body.transform(trans) color_img = cv2.imread(os.path.join(color_dir, img_name + '.jpg')) color_img = cv2.flip(color_img, 1) vis.update_geometry() while True: cv2.imshow('frame', color_img) vis.poll_events() vis.update_renderer() key = cv2.waitKey(30) if key == 27: break count += 1
def run_test(input_array): open3d_array = open3d.Vector3iVector(input_array) output_array = np.asarray(open3d_array) np.testing.assert_allclose(input_array, output_array)
def get_mesh(self): mesh = open3d.TriangleMesh() mesh.vertices = open3d.Vector3dVector(self.vertices) mesh.triangles = open3d.Vector3iVector(self.faces) mesh.vertex_colors = open3d.Vector3dVector(self.colors) return mesh
def update(self, edges, grid_triangles, color='red'): """ Updating only the edges (assuming points don't change). :return: None. """ if color == 'red': c = [1, 0, 0] elif color == 'green': c = [0, 1, 0] else: c = [0, 0, 1] lines = [[edge.p1.id, edge.p2.id] for edge in edges] for edge in edges: if edge.color == []: edge.color = c if edge.p1.id == 2 and edge.p2.id == 2 or edge.p2.id == 2 and edge.p1.id == 2: edge.color = [0, 0, 1] colors = [edge.color for edge in edges] line_set = o3d.geometry.LineSet() points = np.array([(point.x, point.y, point.z) for point in self.points]) line_set.points = o3d.Vector3dVector(points) line_set.lines = o3d.utility.Vector2iVector(lines) line_set.colors = o3d.utility.Vector3dVector(colors) facets = [] for triangle in grid_triangles: index_1 = self.points.index(triangle[0]) index_2 = self.points.index(triangle[1]) index_3 = self.points.index(triangle[2]) facets.append([index_1, index_2, index_3]) facets = np.asarray(facets).astype(np.int32) points_triangles = np.array([(point.x, point.y, point.z) for point in self.points]) mesh = o3d.TriangleMesh() mesh.vertices = o3d.Vector3dVector(points_triangles) mesh.triangles = o3d.Vector3iVector(facets) # Manual fix since i don't define the vertices of a triangle clockwise. If they are anti-clockwise, open3d # won't render their mesh. mesh.compute_triangle_normals() for i, n in enumerate(np.asarray(mesh.triangle_normals)): t = mesh.triangles[i] p_index = t[0] p = self.points[p_index] if np.dot(n, p.normal) < 0: mesh.triangles[i] = np.flip(t) self.visualizer.get_render_option().point_size = 3.5 self.visualizer.add_geometry(line_set) self.visualizer.add_geometry(mesh) # Rotate the object. ctr = self.visualizer.get_view_control() self.rotation_angle += 4 ctr.rotate(x=self.rotation_angle, y=0) self.visualizer.update_geometry() self.visualizer.poll_events() self.visualizer.update_renderer()
# read obj file filename = os.path.join(dirs.inpdir, 'subject_01/Model/frontal1/obj/110920150452_new.obj') obj = pw.Wavefront(filename) print('number of points', obj.number_of_points) print('number of faces', obj.number_of_faces) # read model face filename = models.bfm2017face12nomouth model = FaceModel(filename=filename) points = model.shape.points faces = model.shape.cells colors = model.color.colors print(model) # compute metrics metrics = MovingToFixedPointSetMetrics(moving=points, fixed=obj.points, registration=True) print(metrics) # compute mesh to mesh metrics fixed = obj2open3dmesh(obj) moving = open3d.TriangleMesh() moving.vertices = open3d.Vector3dVector(points) moving.triangles = open3d.Vector3iVector(faces) moving.vertex_colors = open3d.Vector3dVector(colors) moving.compute_vertex_normals() metrics = MeshToMeshMetrics(moving=moving, fixed=fixed, registration=1) print(metrics)
def main(model_folder, model_type='smplx', ext='npz', gender='neutral', plot_joints=False, plotting_module='pyrender', use_face_contour=False): model = smplx.create(model_folder, model_type=model_type, gender=gender, use_face_contour=use_face_contour, ext=ext) print(model) betas = torch.randn([1, 10], dtype=torch.float32) expression = torch.randn([1, 10], dtype=torch.float32) output = model(betas=betas, expression=expression, return_verts=True) vertices = output.vertices.detach().cpu().numpy().squeeze() joints = output.joints.detach().cpu().numpy().squeeze() print('Vertices shape =', vertices.shape) print('Joints shape =', joints.shape) if plotting_module == 'pyrender': import pyrender import trimesh vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8] tri_mesh = trimesh.Trimesh(vertices, model.faces, vertex_colors=vertex_colors) mesh = pyrender.Mesh.from_trimesh(tri_mesh) scene = pyrender.Scene() scene.add(mesh) if plot_joints: sm = trimesh.creation.uv_sphere(radius=0.005) sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0] tfs = np.tile(np.eye(4), (len(joints), 1, 1)) tfs[:, :3, 3] = joints joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs) scene.add(joints_pcl) pyrender.Viewer(scene, use_raymond_lighting=True) elif plotting_module == 'matplotlib': from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection fig = plt.figure() ax = fig.add_subplot(111, projection='3d') mesh = Poly3DCollection(vertices[model.faces], alpha=0.1) face_color = (1.0, 1.0, 0.9) edge_color = (0, 0, 0) mesh.set_edgecolor(edge_color) mesh.set_facecolor(face_color) ax.add_collection3d(mesh) ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') if plot_joints: ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], alpha=0.1) plt.show() elif plotting_module == 'open3d': import open3d as o3d mesh = o3d.TriangleMesh() mesh.vertices = o3d.Vector3dVector(vertices) mesh.triangles = o3d.Vector3iVector(model.faces) mesh.compute_vertex_normals() mesh.paint_uniform_color([0.3, 0.3, 0.3]) o3d.visualization.draw_geometries([mesh]) else: raise ValueError('Unknown plotting_module: {}'.format(plotting_module))
def fitting_func(backward=True): if backward: optimizer.zero_grad() body_pose = vposer.decode(pose_embedding, output_type='aa').view( 1, -1) if use_vposer else None if append_wrists: wrist_pose = torch.zeros([body_pose.shape[0], 6], dtype=body_pose.dtype, device=body_pose.device) body_pose = torch.cat([body_pose, wrist_pose], dim=1) body_model_output = body_model(return_verts=return_verts, body_pose=body_pose, return_full_pose=return_full_pose) total_loss = loss(body_model_output, camera=camera, gt_joints=gt_joints, body_model=body_model, body_model_faces=faces_tensor, joints_conf=joints_conf, joint_weights=joint_weights, pose_embedding=pose_embedding, use_vposer=use_vposer, scan_tensor=scan_tensor, visualize=self.visualize, **kwargs) if backward: total_loss.backward(create_graph=create_graph) if self.visualize: model_output = body_model(return_verts=True, body_pose=body_pose) vertices = model_output.vertices.detach().cpu().numpy() if self.steps == 0 and self.viz_mode == 'o3d': self.body_o3d.vertices = o3d.Vector3dVector( vertices.squeeze()) self.body_o3d.triangles = o3d.Vector3iVector( body_model.faces) self.body_o3d.vertex_normals = o3d.Vector3dVector([]) self.body_o3d.triangle_normals = o3d.Vector3dVector([]) self.body_o3d.compute_vertex_normals() self.vis_o3d.add_geometry(self.body_o3d) if scan_tensor is not None: self.scan.points = o3d.Vector3dVector( scan_tensor.detach().cpu().numpy().squeeze()) N = np.asarray(self.scan.points).shape[0] self.scan.colors = o3d.Vector3dVector( np.tile([1.00, 0.75, 0.80], [N, 1])) self.vis_o3d.add_geometry(self.scan) self.vis_o3d.update_geometry() self.vis_o3d.poll_events() self.vis_o3d.update_renderer() elif self.steps % self.summary_steps == 0: if self.viz_mode == 'o3d': self.body_o3d.vertices = o3d.Vector3dVector( vertices.squeeze()) self.body_o3d.triangles = o3d.Vector3iVector( body_model.faces) self.body_o3d.vertex_normals = o3d.Vector3dVector([]) self.body_o3d.triangle_normals = o3d.Vector3dVector([]) self.body_o3d.compute_vertex_normals() self.vis_o3d.update_geometry() self.vis_o3d.poll_events() self.vis_o3d.update_renderer() else: self.mv.update_mesh(vertices.squeeze(), body_model.faces) self.steps += 1 return total_loss