def _init_camera_renderer(self): """ If not in visualizing mode, initialize camera with given intrinsics """ if self._viewer: return if self._intrinsics in ['kinect_azure', 'realsense']: camera = pyrender.IntrinsicsCamera(self._fx, self._fy, self._cx, self._cy, self._znear, self._zfar) self._camera_node = self._scene.add(camera, pose=np.eye(4), name='camera') self.renderer = pyrender.OffscreenRenderer( viewport_width=self._width, viewport_height=self._height, point_size=1.0) else: camera = pyrender.PerspectiveCamera( yfov=self._fov, aspectRatio=1.0, znear=0.001) # do not change aspect ratio self._camera_node = self._scene.add(camera, pose=tra.euler_matrix( np.pi, 0, 0), name='camera') self.renderer = pyrender.OffscreenRenderer(400, 400)
def __init__(self, callback_fn=None, render_mask=True, shadows=True, platform=None, device_id=0): """Construct a Renderer. Keyword Arguments: callback_fn {callable} -- call a function with rendered images instead of passing to bullet (default: {None}) render_mask {bool} -- render segmentation mask or not (default: {False}) shadows {bool} -- render shadows for all lights (default: {True}) platform {str} -- PyOpenGL platform ('egl', 'osmesa', etc.) (default: {None}) device_id {int} -- EGL device id if platform is 'egl' (default: {0}) """ super().__init__() self._render_mask = render_mask self._flags = pyr.RenderFlags.NONE self._callback_fn = callback_fn if shadows: self._flags |= pyr.RenderFlags.SHADOWS_DIRECTIONAL if platform is not None: os.environ["PYOPENGL_PLATFORM"] = platform os.environ["EGL_DEVICE_ID"] = str(device_id) self._renderer = pyr.OffscreenRenderer(0, 0) self._scene = Scene()
def __init__(self, faces, resolution=(224, 224), orig_img=False, wireframe=False): self.resolution = resolution self.faces = faces self.orig_img = orig_img self.wireframe = wireframe self.renderer = pyrender.OffscreenRenderer( viewport_width=self.resolution[0], viewport_height=self.resolution[1], point_size=1.0) # set the scene self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) #light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=5) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2) light_pose = np.eye(4) light_pose[:3, 3] = [0, -1, 1] self.scene.add(light, pose=light_pose) light_pose[:3, 3] = [0, 1, 1] self.scene.add(light, pose=light_pose) light_pose[:3, 3] = [1, 1, 2] self.scene.add(light, pose=light_pose)
def get_image_depth_and_mask(scene: pyrender.Scene, scene_setup_loader: DiceScene.SceneSetupLoader, width: int, height: int, keep_nodes_in_scene: bool): """Renders an image ggiven a scene and seetup, along with the depth and segmentation mask labelling each die.""" r = pyrender.OffscreenRenderer(width, height) color_bg, depth_bg = r.render(scene) depth_nodes = [] for node in scene_setup_loader.dice_nodes: scene.add_node(node) color_node, depth_node = r.render(scene) depth_nodes.append(depth_node) scene.remove_node(node) scene_setup_loader.add_loaded_to_scene(scene) color_final, depth_final = r.render(scene) if not keep_nodes_in_scene: scene_setup_loader.remove_nodes_from_scene(scene) #Initialize labels of pixels to -1 (for background) labels_mask = np.ones((height, width), dtype=np.int8) * -1 for index, depth_for_node in enumerate(depth_nodes): depth_not_background = np.not_equal(depth_bg, depth_for_node) depth_at_foreground = np.equal(depth_final, depth_for_node) depth_at_dice = np.logical_and(depth_not_background, depth_at_foreground) labels_mask[depth_at_dice] = index return color_final, depth_final, labels_mask
def render_mesh(mesh, height, width): scene = pyrender.Scene(ambient_light=[.3, .3, .3], bg_color=[255, 255, 255]) rgb_per_v = np.zeros_like(mesh.v) rgb_per_v[:, 0] = 0.53 rgb_per_v[:, 1] = 0.81 rgb_per_v[:, 2] = 0.98 tri_mesh = trimesh.Trimesh(vertices=0.001*mesh.v, faces=mesh.f, vertex_colors=rgb_per_v) render_mesh = pyrender.Mesh.from_trimesh(tri_mesh, smooth=True) scene.add(render_mesh, pose=np.eye(4)) camera = pyrender.camera.OrthographicCamera(xmag=0.001*0.5*width, ymag=0.001*0.5*height, znear=0.01, zfar=10) camera_pose = np.eye(4) camera_pose[:3, 3] = np.array([0.001*0.5*width, 0.001*0.5*height, 1.0]) scene.add(camera, pose=camera_pose) light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1.0) light_pose = np.eye(4) light_pose[:3, 3] = np.array([1.0, 1.0, 1.0]) scene.add(light, pose=light_pose.copy()) light_pose[:3, 3] = np.array([0.0, 1.0, 1.0]) scene.add(light, pose=light_pose.copy()) r = pyrender.OffscreenRenderer(viewport_width=width, viewport_height=height) color, _ = r.render(scene) return color[..., ::-1].copy()
def _init_pyrender(self): """ Initialize pyrender """ # Create scene for pybullet sync self.scene = pyrender.Scene() # Create scene for rendering given depth image self.scene_depth = pyrender.Scene() """ objects format: {obj_name: pyrender node} """ self.object_nodes = {} self.current_object_nodes = {} self.current_light_nodes = [] self.cam_light_ids = [] self._init_gel() self._init_camera() self._init_light() self.r = pyrender.OffscreenRenderer(self.width, self.height) colors, depths = self.render(object_poses=None, noise=False, calibration=False) self.depth0 = depths self._background_sim = colors
def render_write(self, show_points=False): output_file = self.get_output_filename() r = pyrender.OffscreenRenderer(self.img_w, self.img_h) color, depth = r.render(self.scene) # render scene color, depth = r.render(self.scene) # write depth sc = list() cv2.imwrite(output_file + "d.tiff", depth) # write color plt.imshow(color) # add grasp points if show_points: for p in self.grasp_points[0:4]: sc.append(plt.scatter(p[0], p[1])) plt.savefig(output_file + "r.png") plt.clf() # write positive grasp txt file with open(output_file + "cpos.txt", "w") as f: for p in self.grasp_points: f.write("%.3f" % p[0]) f.write(" ") f.write("%.3f" % p[1]) f.write("\n")
def _render_pyrender(self, T_camera2world, fovy, height, width): # FIXME: pyrender and pybullet images are not perfectly aligned raise NotImplementedError import pyrender scene = self.scene node_camera = scene.add( obj=pyrender.PerspectiveCamera(yfov=np.deg2rad(fovy), aspectRatio=width / height), pose=morefusion.extra.trimesh.to_opengl_transform(T_camera2world), ) for _ in range(4): direction = self._random_state.uniform(-1, 1, (3, )) direction /= np.linalg.norm(direction) scene.add( obj=pyrender.DirectionalLight( intensity=self._random_state.uniform(0.5, 5), ), pose=trimesh.transformations.rotation_matrix( angle=np.deg2rad(self._random_state.uniform(0, 45)), direction=direction, ), parent_node=node_camera, ) renderer = pyrender.OffscreenRenderer(viewport_width=width, viewport_height=height) rgb, depth = renderer.render(scene) scene.remove_node(node_camera) return rgb, depth
def render(self, camera_pose : np.array, camera_intrinsics : np.array , znear : float = 1.0, zfar : float= 100.0, render_flags : BoxRenderFlags = None): ''' camera_pose: numpy 4x4 array of camera pose in global coordinate system camera_intrinsics: [fx, fy, cx, cy]: list of 4 floating point values for camera intrinsics (fx,fy,cx,cy in pixels) znear: near clipping plane - not relevant to intrinsics - z near defines the clipping of the depth values zfar: far clipping plane - not relevant to intrinsics - z far defines the clipping of the depth values ''' if(self._initialize_offscreen_renderer): self._renderer = pyrender.OffscreenRenderer(self._canvas_width, self._canvas_height) self._initialize_offscreen_renderer = False if(len(self._scene.camera_nodes)>0): self._scene.remove_node(next(iter(self._scene.camera_nodes))) camera = pyrender.IntrinsicsCamera(fx = camera_intrinsics[0], fy = camera_intrinsics[1], cx = camera_intrinsics[2], cy = camera_intrinsics[3], \ znear = znear, zfar = zfar) final_camera_pose = np.dot(camera_pose,self._camera_transform) self._scene.bg_color = self._background_color self._scene.add(camera, pose = final_camera_pose) self._generate_mesh(camera_pose, render_flags) self._add_box_mesh_to_scene() color, depth = self._renderer.render(self._scene, flags = pyrender.RenderFlags.DISABLE_MULTISAMPLING | pyrender.RenderFlags.RGBA) # undo normal color encoding normals = (2.0*color[:,:,:3])/255.0 - 1 labels = color[:,:,3] # convert normals to camera coordinate system inv_camera_transform = np.linalg.inv(self._camera_transform) inv_camera_rot = inv_camera_transform[:3,:3] trans_normals = np.dot(inv_camera_rot, normals.reshape((-1,3)).T) normals_reshaped = np.reshape(trans_normals.T,normals.shape) return color, depth, normals_reshaped, labels
def render_depth(path, override=None): # Load the FUZE bottle trimesh and put it in a scene fuze_trimesh = trimesh.load(path) fuze_trimesh.vertices = ( fuze_trimesh.vertices - np.amin(fuze_trimesh.vertices, axis=0)) / ( np.amax(fuze_trimesh.vertices, axis=0) - np.amin(fuze_trimesh.vertices, axis=0) + 0.001) - 0.5 mesh = pyrender.Mesh.from_trimesh(fuze_trimesh) scene = pyrender.Scene() scene.add(mesh) # Set up the camera -- z-axis away from the scene, x-axis right, y-axis up camera = pyrender.PerspectiveCamera(yfov=np.pi / 5.0, znear=0.15) camera_pose = getCameraPose(override) scene.add(camera, pose=camera_pose) # Set up the light -- a single spot light in the same spot as the camera light = pyrender.SpotLight(color=np.ones(3), intensity=3.0, innerConeAngle=np.pi / 16.0) scene.add(light, pose=camera_pose) # Render the scene r = pyrender.OffscreenRenderer(200, 200) depth = r.render(scene, flags=pyrender.RenderFlags.DEPTH_ONLY) depth = depth / (np.max(depth) + 0.0001) return depth
def render_pred_coeff(pred_coeff): pred_coeff = np.asarray(pred_coeff).tolist() # make scene scene = pyrender.Scene() # initialize camera f_len = 366.474487 rend_size = 256. camera = pyrender.PerspectiveCamera(yfov=np.arctan(rend_size*0.5/f_len)*2, aspectRatio=1) camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) scene.add(camera, pose=camera_pose) # initialize light light_posi1 = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]]) light_posi2 = np.array([[1.0, 0.0, 0.0, -1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]]) light_posi3 = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]]) light = pyrender.SpotLight(color=np.array([0.65098039, 0.74117647, 0.85882353]), intensity=100, innerConeAngle=np.pi/16.0, outerConeAngle=np.pi/6.0) scene.add(light, pose=light_posi1) scene.add(light, pose=light_posi2) scene.add(light, pose=light_posi3) # get renderer r = pyrender.OffscreenRenderer(viewport_width=rend_size, viewport_height=rend_size) # get verts from smpl coefficients smpl_op = load_model("./tf_smpl/neutral_smpl_with_cocoplus_reg.pkl") verts_bias = pred_coeff[3:6] smpl_op.pose[:] = np.asarray([0]*3 + pred_coeff[6:75]) smpl_op.betas[:] = np.array(pred_coeff[75:85]) verts = np.array(smpl_op) rot_mat = cv2.Rodrigues(np.asarray(pred_coeff[3:6]))[0] verts = np.tensordot(verts, rot_mat, axes=([1],[1])) verts = verts + pred_coeff[:3] # make trimesh faces = np.load("./tf_smpl/smpl_faces.npy").astype(np.int32) this_trimesh = trimesh.Trimesh(vertices = verts, faces = faces) this_mesh = pyrender.Mesh.from_trimesh(this_trimesh) scene.add(this_mesh) rend_img, _ = r.render(scene) return rend_img
def _create_scene_and_offscreen_render(): """We will add (and remove) the meshes later. Unfortunately this requires some tuning of the position and rotation. """ scene = pyrender.Scene() camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0) # The position / translation [px, py, pz] and then rotation matrix. p = [0.45, -0.45, 0.90] theta = -45 * DEG_TO_RAD RX = np.array([ [1.0, 0.0, 0.0], [0.0, np.cos(theta), np.sin(theta)], [0.0, -np.sin(theta), np.cos(theta)], ]) R = RX camera_pose = np.array([ [R[0,0], R[0,1], R[0,2], p[0]], [R[1,0], R[1,1], R[1,2], p[1]], [R[2,0], R[2,1], R[2,2], p[2]], [ 0.0, 0.0, 0.0, 1.0], ]) scene.add(camera, pose=camera_pose) light = pyrender.SpotLight(color=np.ones(2), intensity=1.0, innerConeAngle=np.pi/16.0, outerConeAngle=np.pi/6.0) scene.add(light, pose=camera_pose) # Only for debugging #v = pyrender.Viewer(scene, use_raymond_lighting=True) rend = pyrender.OffscreenRenderer(640, 480) return scene, rend
def __init__(self, model_paths, cam_K, H, W): if not isinstance(model_paths, list): print("model_paths have to be list") raise RuntimeError self.scene = pyrender.Scene(ambient_light=[1., 1., 1.], bg_color=[0, 0, 0]) self.camera = pyrender.IntrinsicsCamera(fx=cam_K[0, 0], fy=cam_K[1, 1], cx=cam_K[0, 2], cy=cam_K[1, 2], znear=0.1, zfar=2.0) self.cam_node = self.scene.add(self.camera, pose=np.eye(4)) self.mesh_nodes = [] for model_path in model_paths: print('model_path', model_path) obj_mesh = trimesh.load(model_path) colorVisual = obj_mesh.visual.to_color() mesh = pyrender.Mesh.from_trimesh(obj_mesh) mesh_node = self.scene.add( mesh, pose=np.eye(4), parent_node=self.cam_node) # Object pose parent is cam self.mesh_nodes.append(mesh_node) self.H = H self.W = W self.r = pyrender.OffscreenRenderer(self.W, self.H) self.glcam_in_cvcam = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.cvcam_in_glcam = np.linalg.inv(self.glcam_in_cvcam)
def set_intrinsics(self, size, K, scale=None, znear=0.1, zfar=10, rvec=None): self.K_no_scale = deepcopy(K) self.K = deepcopy(K) if scale is not None: self.K[:2] *= scale self.camera = pyrender.IntrinsicsCamera(fx=self.K[0, 0], fy=self.K[1, 1], cx=self.K[0, 2], cy=self.K[1, 2], znear=znear, zfar=zfar) if self.camera_node is not None: self.scene.remove_node(self.camera_node) self.T = np.eye(4) if rvec is not None: rotmtx = cv2.Rodrigues(np.array(rvec))[0] self.T[:3, :3] = rotmtx self.camera_node = self.scene.add(self.camera, pose=self.T, name='pc-camera') self.size = deepcopy(size) if scale is not None: for k in self.size: self.size[k] *= scale self.viewer = pyrender.OffscreenRenderer(self.size['w'], self.size['h'])
def render(obj_path, camera_mat, return_depth=False, im_size=128): fuze_trimesh = trimesh.load(obj_path) if type(fuze_trimesh) == trimesh.base.Trimesh: m = pyrender.Mesh.from_trimesh(fuze_trimesh) scene = pyrender.Scene() scene.add_node(pyrender.Node(mesh=m)) else: assert type( fuze_trimesh) == trimesh.scene.scene.Scene, "Unrognized file" scene = pyrender.Scene.from_trimesh_scene(fuze_trimesh) camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0) s = np.sqrt(2) / 2 scene.add(camera, pose=camera_mat) light = pyrender.SpotLight(color=np.ones(3), intensity=4.0, innerConeAngle=np.pi / 16.0) scene.add(light, pose=camera_mat) light = pyrender.SpotLight(color=np.ones(3), intensity=6.0, innerConeAngle=0.2 * np.pi) light_pose = np.array( [[0, 1, 0, 0], [0, 0, 1, 1], [1, 0, 0, 0], [0, 0, 0, 1]], dtype=np.float32) scene.add(light, pose=light_pose) r = pyrender.OffscreenRenderer(im_size, im_size) color, depth = r.render(scene) r.delete() if return_depth: return color, depth return color
def render_mesh(self, mesh, width, height): # for i in range(20): scene = pyrender.Scene() #Adding the mesh into the scene scene.add(pyrender.Mesh.from_trimesh(mesh)) #Adding the camera into the scene camera = pyrender.PerspectiveCamera(yfov=self.camera_fov, aspectRatio=self.aspect_ratio) pitch_cos = np.cos(self.camera_rot[0]) pitch_sin = np.sin(self.camera_rot[0]) roll_cos = np.cos(self.camera_rot[1]) roll_sin = np.sin(self.camera_rot[1]) yaw_cos = np.cos(self.camera_rot[2]) yaw_sin = np.sin(self.camera_rot[2]) pitch = np.mat([[1.0, 0.0, 0.0, 0.0], [0.0, pitch_cos, -pitch_sin, 0.0], [0.0, pitch_sin, pitch_cos, 0.0], [0.0, 0.0, 0.0, 1.0]]) roll = np.mat([[roll_cos, 0.0, roll_sin, 0.0], [0.0, 1.0, 0.0, 0.0], [-roll_sin, 0.0, roll_cos, 0.0], [0.0, 0.0, 0.0, 1.0]]) yaw = np.mat([[yaw_cos, -yaw_sin, 0.0, 0.0], [yaw_sin, yaw_cos, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) camera_transform = np.mat([[1.0, 0.0, 0.0, self.camera_pos[0]], [0.0, 1.0, 0.0, self.camera_pos[1]], [0.0, 0.0, 1.0, self.camera_pos[2]], [0.0, 0.0, 0.0, 1.0]]) s = np.sqrt(2) / 2 camera_pose = np.mat([ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], ]) #Multiply yaw*roll*pitch camera_rotation = yaw * roll * pitch camera_pose = camera_transform * camera_rotation * camera_pose #*camera_z_rot*camera_y_rot*camera_x_rot scene.add(camera, pose=np.array(camera_pose)) #Adding lights into the scene for light in self.lights: scene.add(light[0], pose=light[1]) #Render the scene and return the resultant image r = pyrender.OffscreenRenderer(width, height) color, depth = r.render(scene) # io.imsave(str(i)+"_test.png", color) return color
def rnder_one_scene(args, mesh_pth, obj_pose, camera_pose): try: fuze_trimesh = trimesh.load(mesh_pth) mesh = pyrender.Mesh.from_trimesh(fuze_trimesh) except Exception: print("Error loadding from {}".format(mesh_pth)) return scene = pyrender.Scene(ambient_light=[0.9, 0.9, 0.9]) nm = pyrender.Node(mesh=mesh, matrix=obj_pose) scene.add_node(nm) h, w = args.h, args.w if type(args.K) == list: K = np.array(args.K).reshape(3, 3) else: K = args.K camera = pyrender.IntrinsicsCamera(K[0][0], K[1][1], K[0][2], K[1][2]) scene.add(camera, pose=camera_pose) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1.0) # light = pyrender.light.Light(color=[1.0, 1.0, 1.0], intensity=5) scene.add(light) r = pyrender.OffscreenRenderer(w, h) color, depth = r.render(scene) return color, depth
def captureDepth(model, rotations, imageWidth=224, imageHeight=224, cameraZTranslation=2.5, lightIntensity=2.0, depthBegin=1, depthEnd=5): # Construct an offline scene scene = pyrender.Scene() # add parts for part in model.parts: partMesh = pyrender.Mesh.from_trimesh(part.mesh, smooth=False) scene.add(partMesh) # add camera renderCamera = pyrender.PerspectiveCamera( yfov=np.pi / 3.0, aspectRatio=imageWidth/imageHeight) cameraNode = pyrender.Node(camera=renderCamera, matrix=np.eye(4)) cameraNode.translation[2] = cameraZTranslation scene.add_node(cameraNode) # add light light = pyrender.DirectionalLight( color=[1.0, 1.0, 1.0], intensity=lightIntensity) lightNode = pyrender.Node(light=light, matrix=np.eye(4)) scene.add_node(lightNode) # initialize offscreen renderer offscreenRenderer = pyrender.OffscreenRenderer( viewport_width=imageWidth, viewport_height=imageHeight, point_size=1.0) # render result = [] for rotation in rotations: result.append(renderSceneWithRotation(offscreenRenderer, scene, rotation, depthBegin, depthEnd)) return result
def __init__(self, size=(512, 512), config=None): config = { 'ambient_light': (0.1, 0.1, 0.1), 'bg_color': (255, 255, 255), } if config is None else config self.renderer = pyrender.OffscreenRenderer(*size) self.scene = pyrender.Scene(bg_color=config['bg_color'], ambient_light=config['ambient_light']) camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=size[0] / size[1], znear=0.01, zfar=10) self.camera_node = self.scene.add(camera, pose=self.make_camera_pose()) self.dir_light_nodes = [] n_lights = 10 for i in range(n_lights): dir_light = pyrender.DirectionalLight(color=np.ones(3), intensity=10.0/n_lights) dir_light_pose = np.eye(4) dir_light_pose[:3,:3] = Rotation.from_euler( 'zyx', [0, i * 360 / n_lights, -30], degrees=True ).as_matrix() dir_light_node = self.scene.add(dir_light, pose=dir_light_pose) self.dir_light_nodes.append(dir_light_node) self.persistent = [self.camera_node] + self.dir_light_nodes
def rendering(R, fuze_trimesh): mesh = pyrender.Mesh.from_trimesh(fuze_trimesh, poses=[R]) scene = pyrender.Scene() scene.add(mesh) camera = pyrender.PerspectiveCamera(yfov=np.pi / 3, aspectRatio=1) m1 = np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ], dtype='float') m2 = np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0.4], [0, 0, 0, 1], ], dtype='float') camera_pose = m1.dot(m2) scene.add(camera, pose=camera_pose) light = pyrender.SpotLight(color=np.ones(3), intensity=3.0, innerConeAngle=np.pi / 16.0) scene.add(light, pose=camera_pose) r = pyrender.OffscreenRenderer(400, 400) color, depth = r.render(scene) color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB) return color
def main(): name = 's0_train' dataset = get_dataset(name) idx = 70 sample = dataset[idx] scene_r = create_scene(sample, dataset.obj_file) scene_v = create_scene(sample, dataset.obj_file) print('Visualizing pose in camera view using pyrender renderer') r = pyrender.OffscreenRenderer(viewport_width=dataset.w, viewport_height=dataset.h) im_render, _ = r.render(scene_r) im_real = cv2.imread(sample['color_file']) im_real = im_real[:, :, ::-1] im = 0.33 * im_real.astype(np.float32) + 0.67 * im_render.astype( np.float32) im = im.astype(np.uint8) print('Close the window to continue.') plt.imshow(im) plt.tight_layout() plt.show() print('Visualizing pose using pyrender 3D viewer') pyrender.Viewer(scene_v)
def save_image_face(facedata, vec, name="face", path=""): plt.clf() plt.close() # Generate mesh predict_trimeshh = facedata.vec2meshTrimesh2(vec) trimeshh = pyrender.Mesh.from_trimesh(predict_trimeshh, smooth=False) # Create scene for rendering scene = pyrender.Scene(ambient_light=[.1, .1, .3], bg_color=[0, 0, 0]) camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0) light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=2e3) scene.add(trimeshh, pose=np.eye(4)) scene.add(light, pose=np.eye(4)) camera_pose = np.array([ [0.94063, 0.01737, -0.41513, -88.15790], [-0.06728, 0.98841, -0.16663, -35.36127], [0.33266, 0.15078, 1.14014, 241.71166], [0.00000, 0.00000, 0.00000, 1.00000] ]) scene.add(camera, pose=camera_pose) # Use this in order to visualize the mesh live: # view = pyrender.Viewer(scene) # cam = view.get_my_camera_node_viewer() # Allow access to procted attributes # print(view.scene.get_pose(scene.main_camera_node)) r = pyrender.OffscreenRenderer(512, 512) color, _ = r.render(scene) plt.figure(figsize=(8, 8)) plt.imshow(color) img_name = path + name + ".png" plt.savefig(img_name) plt.clf()
def mesh_to_png(file_name, mesh, width=640, height=480, z_camera_translation=400): mesh = trimesh.base.Trimesh(vertices=mesh.vertices, faces=mesh.triangles, vertex_colors=mesh.colors) mesh = pyrender.Mesh.from_trimesh(mesh, smooth=True, wireframe=False) # compose scene scene = pyrender.Scene(ambient_light=np.array([1.7, 1.7, 1.7, 1.0]), bg_color=[255, 255, 255]) camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0) light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=2e3) scene.add(mesh, pose=np.eye(4)) scene.add(light, pose=np.eye(4)) # Added camera translated z_camera_translation in the 0z direction w.r.t. the origin scene.add(camera, pose=[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, z_camera_translation], [0, 0, 0, 1]]) # render scene r = pyrender.OffscreenRenderer(width, height) color, _ = r.render(scene) imsave(file_name, color)
def store_param_history(self, plot_manager, folder, param_history): for i, params in enumerate(param_history): face_mesh = self.bfm.draw_sample( shape_coefficients=params.shape_coefficients, expression_coefficients=params.expression_coefficients, color_coefficients=[ 0 for _ in range(self.n_color_coefficients) ]) translation = np.zeros((4, 4)) translation[2, 3] = -150 perspective_camera = get_perspective_camera( self.intrinsics, self.img_width, self.img_height) scene = setup_standard_scene(perspective_camera) scene.add(pyrender.Mesh.from_points(self.pointcloud, colors=self.colors), pose=np.eye(4) + translation) scene.add(pyrender.Mesh.from_trimesh( self.bfm.convert_to_trimesh(face_mesh)), pose=params.camera_pose + translation) r = pyrender.OffscreenRenderer(self.img_width * 2, self.img_height * 2) color, depth = r.render(scene) r.delete() plt.figure(figsize=(8, 12)) plt.imshow(color) plot_manager.save_current_plot(f"{folder}/iteration_{i:05d}.jpg") # plt.show() plt.close()
def __init__(self, focal_length=5000, img_res=224, faces=None): self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res, viewport_height=img_res, point_size=1.0) self.focal_length = focal_length self.camera_center = [img_res // 2, img_res // 2] self.faces = faces
def render_hmr_smpl(hmr_coeff, f_len=500., rend_size=224., req_model=False): # hmr_coeff is a 85-vector, named as theta in hmr hmr_coeff = np.asarray(hmr_coeff).tolist() # make scene scene = pyrender.Scene() # initialize camera camera = pyrender.PerspectiveCamera( yfov=np.arctan(rend_size * 0.5 / f_len) * 2, aspectRatio=1) camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) scene.add(camera, pose=camera_pose) # initialize light light_posi1 = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]]) light_posi2 = np.array([[1.0, 0.0, 0.0, -1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]]) light_posi3 = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]]) light = pyrender.SpotLight(color=np.array( [0.65098039, 0.74117647, 0.85882353]), intensity=100, innerConeAngle=np.pi / 16.0, outerConeAngle=np.pi / 6.0) scene.add(light, pose=light_posi1) scene.add(light, pose=light_posi2) scene.add(light, pose=light_posi3) # get renderer r = pyrender.OffscreenRenderer(viewport_width=rend_size, viewport_height=rend_size) # get verts from smpl coefficients smpl_op = load_model("./tf_smpl/neutral_smpl_with_cocoplus_reg.pkl") smpl_op.pose[:] = np.asarray(hmr_coeff[3:75]) smpl_op.betas[:] = np.array(hmr_coeff[75:85]) verts = np.array(smpl_op) global_t = np.array( [hmr_coeff[1], hmr_coeff[2], f_len / (0.5 * rend_size * hmr_coeff[0])]) verts = verts + global_t faces = np.load("./tf_smpl/smpl_faces.npy").astype(np.int32) # smooth and expand om_mesh = make_trimesh(verts, faces) om_mesh = smooth_mesh(om_mesh, 4) om_mesh = expand_mesh(om_mesh, 0.026) this_trimesh = trimesh.Trimesh(vertices=om_mesh.points(), faces=om_mesh.face_vertex_indices()) this_mesh = pyrender.Mesh.from_trimesh(this_trimesh) scene.add(this_mesh) rend_img, depth = r.render(scene) if req_model is True: return rend_img, verts, faces, depth else: return rend_img
def __init__(self, width=1200, height=800, use_offscreen=True): super(MeshViewer, self).__init__() self.use_offscreen = use_offscreen self.render_wireframe = False self.mat_constructor = pyrender.MetallicRoughnessMaterial self.trimesh_to_pymesh = pyrender.Mesh.from_trimesh self.scene = pyrender.Scene(bg_color=colors['white'], ambient_light=(0.3, 0.3, 0.3)) pc = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=float(width) / height) camera_pose = np.eye(4) camera_pose[:3, 3] = np.array([0, 0, 2.5]) self.scene.add(pc, pose=camera_pose, name='pc-camera') self.figsize = (width, height) if self.use_offscreen: self.viewer = pyrender.OffscreenRenderer(*self.figsize) self.use_raymond_lighting(5.) else: self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True, viewport_size=self.figsize, cull_faces=False, run_in_thread=True) self.set_background_color(colors['white'])
def get_rendered_depth(method, mesh_root, param_root, model_name, camera_index): trimesh_obj = trimesh.load(get_mesh_path(mesh_root, model_name, method), process=True) mesh = pyrender.Mesh.from_trimesh(trimesh_obj) extrinsic, intrinsic = read_camera(param_root, model_name, camera_index) scene = None if LIGHTING == 'plain': scene = pyrender.Scene(ambient_light=[0.75, 0.75, 0.75]) else: scene = pyrender.Scene() scene.add(mesh) camera_intrinsic = pyrender.IntrinsicsCamera(intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], zfar=6000) scene.add(camera_intrinsic, pose=extrinsic) if LIGHTING != 'plain': for n in create_raymond_lights(): scene.add_node(n, scene.main_camera_node) # pyrender.Viewer(scene, viewport_size=window_dims[::-1]) r = pyrender.OffscreenRenderer(window_dims[1], window_dims[0]) color, depth = r.render(scene) return depth
def generate_thumbnail(inname, outname, light_color=[1.0, 1.0, 1.0]): """Generate a thumbnail image of a GLB mesh. """ print("Loading", inname) tmesh_obj = trimesh.load(inname) mesh = glbutils.getSceneMesh(tmesh_obj) corners = bound_corners(mesh.bounds) tm_scene = trimesh.scene.scene.Scene(geometry=[mesh]) cam = tm_scene.camera cam_transform = cam.look_at(corners) pyren_mesh = pyrender.Mesh.from_trimesh(mesh) pyren_scene = pyrender.Scene() pyren_scene.add(pyren_mesh) pyren_cam = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0) pyren_scene.add(pyren_cam, pose=cam_transform) pyren_light = pyrender.DirectionalLight(color=np.asarray(light_color), intensity=3.0) pyren_scene.add(pyren_light) r = pyrender.OffscreenRenderer(400, 400) color, depth = r.render(pyren_scene) plt.imsave(outname, color) print(outname, "written")
def __init__( self, canvas_shape, faces=None, bg_color=(255, 0, 0, 0), world_angles=(-np.pi, 0.0, 0.0), ambient_light=(0.3, 0.3, 0.3), directional_light_intensity=2.0 ): self.canvas_shape = canvas_shape self.faces = faces # world transform world_rotation_matrix = self.rotation_matrix_from_angles(world_angles) world_transformation_matrix = np.eye(4) world_transformation_matrix[:3, :3] = world_rotation_matrix self.world_transformation_matrix = world_transformation_matrix self.scene = pyrender.Scene(bg_color=tuple(bg_color), ambient_light=tuple(ambient_light)) self.viewer = pyrender.OffscreenRenderer(*canvas_shape) # light light_nodes = self.build_raymond_light(directional_light_intensity, T=self.world_transformation_matrix) for light_node in light_nodes: self.scene.add_node(light_node)