def create_scene(self, off_screen=False, ego_view=True, pose=None): """ Creates a scene with the robot and a camera Parameters ---------- off_screen : bool, optional False for showing and displaying the robot, by default False ego_view : bool, optional True for ego view, false for observer camera, by default True """ if pose is None: if ego_view: pose = [[0, 0.5, -0.35, 0.2], [-1, 0, 0, 0.0], [0, 0.35, 0.5, 1.5], [0, 0, 0, 1]] else: pose = [[0, 0, 1, 1.5], [1, 0, 0, 0.0], [0, 1, 0, 1.2], [0, 0, 0, 1]] node_map = {} self.scene = pyrender.Scene() fk = self.get_robot_trimesh() for tm in self.get_robot_trimesh(): pose = fk[tm] mesh = pyrender.Mesh.from_trimesh(tm, smooth=False) node = self.scene.add(mesh, pose=pose) self.r_node_map[tm] = node if off_screen: if ego_view: camera = pyrender.IntrinsicsCamera(400, 400, 1920 / 2, 1080 / 2) light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=2e3) ''' self.scene.add(camera, pose=[[ 0, 0.5, -0.87, 0.2], [ -1, 0, 0, 0.0], [ 0, 0.87, 0.5, 1.5], [ 0, 0, 0, 1]])''' self.scene.add(camera, pose=pose) self.scene.add(light, pose=np.eye(4)) else: camera = pyrender.IntrinsicsCamera(800, 800, 1920 / 2, 1080 / 2) light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=2e3) self.scene.add(camera, pose=pose) self.scene.add(light, pose=np.eye(4)) self.rend = pyrender.OffscreenRenderer(1920, 1080) logger.debug("renderer initialised: " + str(self.rend)) else: self.viewer = pyrender.Viewer(self.scene, use_raymond_lighting=True, run_in_thread=True)
def render(self, camera_pose : np.array, camera_intrinsics : np.array , znear : float = 1.0, zfar : float= 100.0, render_flags : BoxRenderFlags = None): ''' camera_pose: numpy 4x4 array of camera pose in global coordinate system camera_intrinsics: [fx, fy, cx, cy]: list of 4 floating point values for camera intrinsics (fx,fy,cx,cy in pixels) znear: near clipping plane - not relevant to intrinsics - z near defines the clipping of the depth values zfar: far clipping plane - not relevant to intrinsics - z far defines the clipping of the depth values ''' if(self._initialize_offscreen_renderer): self._renderer = pyrender.OffscreenRenderer(self._canvas_width, self._canvas_height) self._initialize_offscreen_renderer = False if(len(self._scene.camera_nodes)>0): self._scene.remove_node(next(iter(self._scene.camera_nodes))) camera = pyrender.IntrinsicsCamera(fx = camera_intrinsics[0], fy = camera_intrinsics[1], cx = camera_intrinsics[2], cy = camera_intrinsics[3], \ znear = znear, zfar = zfar) final_camera_pose = np.dot(camera_pose,self._camera_transform) self._scene.bg_color = self._background_color self._scene.add(camera, pose = final_camera_pose) self._generate_mesh(camera_pose, render_flags) self._add_box_mesh_to_scene() color, depth = self._renderer.render(self._scene, flags = pyrender.RenderFlags.DISABLE_MULTISAMPLING | pyrender.RenderFlags.RGBA) # undo normal color encoding normals = (2.0*color[:,:,:3])/255.0 - 1 labels = color[:,:,3] # convert normals to camera coordinate system inv_camera_transform = np.linalg.inv(self._camera_transform) inv_camera_rot = inv_camera_transform[:3,:3] trans_normals = np.dot(inv_camera_rot, normals.reshape((-1,3)).T) normals_reshaped = np.reshape(trans_normals.T,normals.shape) return color, depth, normals_reshaped, labels
def _init_camera_renderer(self): """ If not in visualizing mode, initialize camera with given intrinsics """ if self._viewer: return if self._intrinsics in ['kinect_azure', 'realsense']: camera = pyrender.IntrinsicsCamera(self._fx, self._fy, self._cx, self._cy, self._znear, self._zfar) self._camera_node = self._scene.add(camera, pose=np.eye(4), name='camera') self.renderer = pyrender.OffscreenRenderer( viewport_width=self._width, viewport_height=self._height, point_size=1.0) else: camera = pyrender.PerspectiveCamera( yfov=self._fov, aspectRatio=1.0, znear=0.001) # do not change aspect ratio self._camera_node = self._scene.add(camera, pose=tra.euler_matrix( np.pi, 0, 0), name='camera') self.renderer = pyrender.OffscreenRenderer(400, 400)
def get_rendered_depth(method, mesh_root, param_root, model_name, camera_index): trimesh_obj = trimesh.load(get_mesh_path(mesh_root, model_name, method), process=True) mesh = pyrender.Mesh.from_trimesh(trimesh_obj) extrinsic, intrinsic = read_camera(param_root, model_name, camera_index) scene = None if LIGHTING == 'plain': scene = pyrender.Scene(ambient_light=[0.75, 0.75, 0.75]) else: scene = pyrender.Scene() scene.add(mesh) camera_intrinsic = pyrender.IntrinsicsCamera(intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], zfar=6000) scene.add(camera_intrinsic, pose=extrinsic) if LIGHTING != 'plain': for n in create_raymond_lights(): scene.add_node(n, scene.main_camera_node) # pyrender.Viewer(scene, viewport_size=window_dims[::-1]) r = pyrender.OffscreenRenderer(window_dims[1], window_dims[0]) color, depth = r.render(scene) return depth
def rnder_one_scene(args, mesh_pth, obj_pose, camera_pose): try: fuze_trimesh = trimesh.load(mesh_pth) mesh = pyrender.Mesh.from_trimesh(fuze_trimesh) except Exception: print("Error loadding from {}".format(mesh_pth)) return scene = pyrender.Scene(ambient_light=[0.9, 0.9, 0.9]) nm = pyrender.Node(mesh=mesh, matrix=obj_pose) scene.add_node(nm) h, w = args.h, args.w if type(args.K) == list: K = np.array(args.K).reshape(3, 3) else: K = args.K camera = pyrender.IntrinsicsCamera(K[0][0], K[1][1], K[0][2], K[1][2]) scene.add(camera, pose=camera_pose) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1.0) # light = pyrender.light.Light(color=[1.0, 1.0, 1.0], intensity=5) scene.add(light) r = pyrender.OffscreenRenderer(w, h) color, depth = r.render(scene) return color, depth
def add_camera(self, camera_pose = np.eye(4)): fx = self.config["camera"]["fx"] fy = self.config["camera"]["fy"] cx = self.config["camera"]["width"] / 2 cy = self.config["camera"]["height"] /2 camera = pyrender.IntrinsicsCamera(fx, fy, cx, cy) self.scene.add(camera, pose=camera_pose)
def __init__(self, model_paths, cam_K, H, W): if not isinstance(model_paths, list): print("model_paths have to be list") raise RuntimeError self.scene = pyrender.Scene(ambient_light=[1., 1., 1.], bg_color=[0, 0, 0]) self.camera = pyrender.IntrinsicsCamera(fx=cam_K[0, 0], fy=cam_K[1, 1], cx=cam_K[0, 2], cy=cam_K[1, 2], znear=0.1, zfar=2.0) self.cam_node = self.scene.add(self.camera, pose=np.eye(4)) self.mesh_nodes = [] for model_path in model_paths: print('model_path', model_path) obj_mesh = trimesh.load(model_path) colorVisual = obj_mesh.visual.to_color() mesh = pyrender.Mesh.from_trimesh(obj_mesh) mesh_node = self.scene.add( mesh, pose=np.eye(4), parent_node=self.cam_node) # Object pose parent is cam self.mesh_nodes.append(mesh_node) self.H = H self.W = W self.r = pyrender.OffscreenRenderer(self.W, self.H) self.glcam_in_cvcam = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.cvcam_in_glcam = np.linalg.inv(self.glcam_in_cvcam)
def set_intrinsics(self, size, K, scale=None, znear=0.1, zfar=10, rvec=None): self.K_no_scale = deepcopy(K) self.K = deepcopy(K) if scale is not None: self.K[:2] *= scale self.camera = pyrender.IntrinsicsCamera(fx=self.K[0, 0], fy=self.K[1, 1], cx=self.K[0, 2], cy=self.K[1, 2], znear=znear, zfar=zfar) if self.camera_node is not None: self.scene.remove_node(self.camera_node) self.T = np.eye(4) if rvec is not None: rotmtx = cv2.Rodrigues(np.array(rvec))[0] self.T[:3, :3] = rotmtx self.camera_node = self.scene.add(self.camera, pose=self.T, name='pc-camera') self.size = deepcopy(size) if scale is not None: for k in self.size: self.size[k] *= scale self.viewer = pyrender.OffscreenRenderer(self.size['w'], self.size['h'])
def __init__(self, width, height, camera_distance=0.05, pose_y=0.0, focal_length=None): self.camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05) if focal_length: focal_length = focal_length * height self.camera = pyrender.IntrinsicsCamera(focal_length, focal_length, width / 2, height / 2, 0.05, 5.05) self.width = width self.height = height self.global_tr = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, pose_y], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) self.camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, camera_distance], [0.0, 0.0, 0.0, 1.0]]) self.tri_mesh = self.py_mesh = self.vertices = self.faces = self.render = self.py_scene = None
def render_images(self, mode): if self.all_viewpoints[mode] is None: print("Error: list of points to render %s is None" % mode) r = pyrender.OffscreenRenderer(self.width*self.anti_alias, self.height*self.anti_alias) if mode is 'train': N = min(len(self.all_viewpoints['train']), self.n_views_train) elif mode is 'test': N = min(len(self.all_viewpoints['test']), self.n_views_test) elif mode is 'val': N = min(len(self.all_viewpoints['val']), self.n_views_train) print("Rendering %s from %d views ..." % (mode,N)) frames_data_dump = [] camera = pyrender.IntrinsicsCamera(self.fx, self.fx, self.cx, self.cy, 0.0001, 1000) camera_node = self.scene.add(camera, pose=np.eye(4)) #camera_node = self.scene.add(camera, self.all_viewpoints['train'][1][0]) for i in tqdm(range(N)): #camera_pose = lookAt(np.array([-9.0,-4.0,13]),np.array([0,0,0])) camera_position = self.all_viewpoints[mode][i] camera_pose = lookAt(camera_position, [0,0,0]) self.scene.set_pose(self.scene.main_camera_node, pose = camera_pose) # Render the scene color, depth = r.render(self.scene, flags=1024) #skip backface culling #print('\n',self.scene.main_camera_node.matrix , '\n') #print('\n', np.max(color)) #print(np.min(color)) if np.max(color) == np.min(color): print("rendered nothing for %d-th pose" % i) #np.save(os.path.join(opt.folder_name, 'poses_train.npy'), view_sampler.points) # save png file im = PIL.Image.fromarray(color) # Down sample high resolution version if self.anti_alias > 1: im.thumbnail([self.height, self.width], PIL.Image.ANTIALIAS) # I/O - Json and image im.save((self.target_path + '%s/r_%04d.png' % (mode,i))) frames_data_dump.append({'file_path': './%s/r_%04d' % (mode,i), 'transform_matrix':camera_pose.tolist()}) with open(self.target_path + '/transforms_%s.json' % mode, 'w') as fp: json.dump({'camera_angle_x':self.camera_angle,'frames':frames_data_dump}, fp, indent=4, ) # Change to true for visualization if True: import matplotlib.pyplot as plt if i<3: plt.figure() plt.imshow(color) plt.figure() plt.show()
def render_skeleton(self, joints, camera_translation, image): material = pyrender.MetallicRoughnessMaterial( metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=(0.8, 0.3, 0.3, 1.0)) camera_translation[0] *= -1. scene = pyrender.Scene() # print(joints.shape) sm = trimesh.creation.uv_sphere(radius=0.01) sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0] tfs = np.tile(np.eye(4), (len(joints), 1, 1)) tfs[:, :3, 3] = joints tfs[:, 1, 1] = -1 tfs[:, 2, 2] = -1 rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) # print(rot) joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs, material=material) scene.add(joints_pcl, 'joints') # p = trimesh.load_path([[joints[29,:], joints[30,:]]]) bones = pyrender.Mesh.from_points([[joints[29, :], joints[30, :]]]) scene.add(bones, 'bones') camera_pose = np.eye(4) camera_pose[:3, 3] = camera_translation camera = pyrender.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length, cx=self.camera_center[0], cy=self.camera_center[1]) scene.add(camera, pose=camera_pose) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1) light_pose = np.eye(4) light_pose[:3, 3] = np.array([0, -1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([0, 1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([1, 1, 2]) scene.add(light, pose=light_pose) # pyrender.Viewer(scene) color, rend_depth = self.renderer.render( scene, flags=pyrender.RenderFlags.RGBA) color = color.astype(np.float32) / 255.0 valid_mask = (rend_depth > 0)[:, :, None] output_img = (color[:, :, :3] * valid_mask + (1 - valid_mask) * image) return output_img
def get_intrinsics(): return pyrender.IntrinsicsCamera( fx=FOCAL, fy=FOCAL, cx=IMAGE_SIZE/2, cy=IMAGE_SIZE/2, znear=Z_NEAR, zfar=Z_FAR)
def cameraFromIntrinsics(rs_intrinsics): """Returns Pyrender Camera. Makes a Pyrender camera from realsense intrinsics """ return pyrender.IntrinsicsCamera(cx=rs_intrinsics.ppx, cy=rs_intrinsics.ppy, fx=rs_intrinsics.fx, fy=rs_intrinsics.fy)
def __init__(self, name, device='cuda:0'): """Constructor. Args: name: Sequence name. device: A torch.device string argument. The specified device is used only for certain data loading computations, but not storing the loaded data. Currently the loaded data is always stored as numpy arrays on cpu. """ assert device in ('cuda', 'cpu') or device.split(':')[0] == 'cuda' self._name = name self._device = torch.device(device) self._loader = SequenceLoader(self._name, device=device, preload=False, app='renderer') # Create pyrender cameras. self._cameras = [] for c in range(self._loader.num_cameras): K = self._loader.K[c].cpu().numpy() fx = K[0][0].item() fy = K[1][1].item() cx = K[0][2].item() cy = K[1][2].item() cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy) self._cameras.append(cam) # Create meshes for YCB objects. self._mesh_y = [] for o in range(self._loader.num_ycb): obj_file = self._loader.ycb_group_layer.obj_file[o] mesh = trimesh.load(obj_file) mesh = pyrender.Mesh.from_trimesh(mesh) self._mesh_y.append(mesh) # Create spheres for MANO joints. self._mesh_j = [] for o in range(self._loader.num_mano): mesh = trimesh.creation.uv_sphere(radius=0.005) mesh.visual.vertex_colors = [1.0, 0.0, 0.0] self._mesh_j.append(mesh) self._faces = self._loader.mano_group_layer.f.cpu().numpy() w = self._loader.dimensions[0] h = self._loader.dimensions[1] self._r = pyrender.OffscreenRenderer(viewport_width=w, viewport_height=h) self._render_dir = [ os.path.join(os.path.dirname(__file__), "..", "data", "render", self._name, self._loader.serials[c]) for c in range(self._loader.num_cameras) ] for d in self._render_dir: os.makedirs(d, exist_ok=True)
def render_depth_map_mesh( self, K, R, t, height, width, znear=0.05, zfar=1500, ): scene = pyrender.Scene() mesh = pyrender.Mesh( primitives=[ pyrender.Primitive( positions=self.verts, normals=self.normals, color_0=self.colors, indices=self.faces, mode=pyrender.GLTF.TRIANGLES, ) ], is_visible=True, ) mesh_node = pyrender.Node(mesh=mesh, matrix=np.eye(4)) scene.add_node(mesh_node) cam = pyrender.IntrinsicsCamera( fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=znear, zfar=zfar, ) T = np.eye(4) T[:3, :3] = R.T T[:3, 3] = (-R.T @ t.reshape(3, 1)).ravel() cv2gl = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) T = T @ cv2gl cam_node = pyrender.Node(camera=cam, matrix=T) scene.add_node(cam_node) light = pyrender.DirectionalLight(color=np.ones(3), intensity=3) light_node = pyrender.Node(light=light, matrix=np.eye(4)) scene.add_node(light_node, parent_node=cam_node) render = pyrender.OffscreenRenderer(self.width, self.height) color, depth = render.render(scene) # if self.vis: # depth[depth <= 0] = np.NaN # depth = co.plt.image_colorcode(depth) # imwrite(dm_path.with_suffix(".jpg"), depth) return depth
def __call__(self, vertices, camera_translation, image): material = pyrender.MetallicRoughnessMaterial( metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=(0.8, 0.3, 0.3, 1.0)) camera_translation[0] *= -1. mesh = trimesh.Trimesh(vertices, self.faces) rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) mesh.apply_transform(rot) #rot1 = np.eye(4) #rot1[:3,:3] = camera_rotation.T #mesh.apply_transform(rot1) mesh = pyrender.Mesh.from_trimesh(mesh, material=material) scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5)) scene.add(mesh, 'mesh') camera_pose = np.eye(4) camera_pose[:3, 3] = camera_translation #camera_pose[:3,:3] = camera_rotation #print(camera_pose) camera = pyrender.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length, cx=self.camera_center[0], cy=self.camera_center[1]) #camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0) scene.add(camera, pose=camera_pose) #light = pyrender.SpotLight(color=np.ones(3), intensity=3.0, # innerConeAngle=np.pi/16.0, # outerConeAngle=np.pi/6.0) #scene.add(light, pose=camera_pose) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1) #scene.add(light, pose=camera_pose) light_pose = np.eye(4) #light_pose[:3,:3] = camera_rotation light_pose[:3, 3] = np.array([0, -1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([0, 1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([1, 1, 2]) scene.add(light, pose=light_pose) color, rend_depth = self.renderer.render( scene, flags=pyrender.RenderFlags.RGBA) color = color.astype(np.float32) / 255.0 valid_mask = (rend_depth > 0)[:, :, None] output_img = (color[:, :, :3] * valid_mask + (1 - valid_mask) * image) return output_img
def __call__(self, height, width, intrinsics, pose, mesh): self.renderer.viewport_height = height self.renderer.viewport_width = width self.scene.clear() self.scene.add(mesh) cam = pyrender.IntrinsicsCamera(cx=intrinsics[0, 2], cy=intrinsics[1, 2], fx=intrinsics[0, 0], fy=intrinsics[1, 1]) self.scene.add(cam, pose=self.fix_pose(pose)) return self.renderer.render(self.scene)#, self.render_flags)
def __init__(self, z=0, x=0, y=0, width=1024, light_color=None, f=None): if light_color is None: light_color = np.array([1.0, 1.0, 1.0]) if f is None: f = np.array([4754.97941935 / 2, 4754.97941935 / 2]) self.mesh = None frustum = {"near": 0.01, "far": 100.0, "height": 1024, "width": width} camera_params = { "c": np.array([x, y]), "k": np.array([-0.19816071, 0.92822711, 0, 0, 0]), "f": f, } intensity = 1.5 self.rgb_per_v = None self.scene = pyrender.Scene(ambient_light=[0.2, 0.2, 0.2], bg_color=[255, 255, 255]) camera = pyrender.IntrinsicsCamera( fx=camera_params["f"][0], fy=camera_params["f"][1], cx=camera_params["c"][0], cy=camera_params["c"][1], znear=frustum["near"], zfar=frustum["far"], ) camera_pose = np.eye(4) camera_pose[:3, 3] = np.array([0, 0, 1.0 - z]) self.scene.add(camera, pose=camera_pose) angle = np.pi / 6.0 pos = [0, 0, 1] light = pyrender.PointLight(color=light_color, intensity=intensity) light_pose = np.eye(4) light_pose[:3, 3] = pos self.scene.add(light, pose=light_pose.copy()) light_pose[:3, 3] = cv2.Rodrigues(np.array([angle, 0, 0]))[0].dot(pos) self.scene.add(light, pose=light_pose.copy()) light_pose[:3, 3] = cv2.Rodrigues(np.array([-angle, 0, 0]))[0].dot(pos) self.scene.add(light, pose=light_pose.copy()) light_pose[:3, 3] = cv2.Rodrigues(np.array([0, -angle, 0]))[0].dot(pos) self.scene.add(light, pose=light_pose.copy()) light_pose[:3, 3] = cv2.Rodrigues(np.array([0, angle, 0]))[0].dot(pos) self.scene.add(light, pose=light_pose.copy()) self.r = pyrender.OffscreenRenderer(viewport_width=frustum["width"], viewport_height=frustum["height"])
def __call__(self, verts, cam_trans, img=None, angle=None, axis=None, mesh_filename=None, color=[0.8, 0.3, 0.3], return_mask=False): mesh = trimesh.Trimesh(verts, self.faces, process=False) Rx = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) mesh.apply_transform(Rx) cam_trans[0] *= -1. if angle and axis: # Apply given mesh rotation to the mesh - useful for rendering from different views R = trimesh.transformations.rotation_matrix( math.radians(angle), axis) mesh.apply_transform(R) material = pyrender.MetallicRoughnessMaterial( metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=(color[0], color[1], color[2], 1.0)) mesh = pyrender.Mesh.from_trimesh(mesh, material=material) mesh_node = self.scene.add(mesh, 'mesh') camera_pose = np.eye(4) camera_pose[:3, 3] = cam_trans camera = pyrender.IntrinsicsCamera(fx=config.FOCAL_LENGTH, fy=config.FOCAL_LENGTH, cx=self.camera_center[0], cy=self.camera_center[1]) cam_node = self.scene.add(camera, pose=camera_pose) rgb, rend_depth = self.renderer.render(self.scene, flags=RenderFlags.RGBA) valid_mask = (rend_depth > 0) if return_mask: return valid_mask else: if img is None: img = np.zeros((self.img_res, self.img_res, 3)) valid_mask = valid_mask[:, :, None] output_img = rgb[:, :, :-1] * valid_mask + (1 - valid_mask) * img image = output_img.astype(np.uint8) self.scene.remove_node(mesh_node) self.scene.remove_node(cam_node) return image
def update_camera(self, intrinsics): for node in self.scene.get_nodes(): if node.name == 'camera': self.scene.remove_node(node) pc = pyrender.IntrinsicsCamera(fx=intrinsics[0, 0], fy=intrinsics[1, 1], cx=intrinsics[0, 2], cy=intrinsics[1, 2], zfar=1000) camera_pose = np.eye(4) self.scene.add(pc, pose=camera_pose, name='camera')
def __init__(self, object_name_or_mesh, K, camera_name, mesh_scale=1.0): """ object_name_or_mesh: either object name string (for objects), or {'vertices': ..., 'faces': ...} (for hand mesh) K: 3x3 intrinsics matrix mesh_scale: scale factor applied to the mesh (1.0 for hand, 1e-3 for object) """ self.K = K self.camera_name = camera_name if camera_name == 'kinect2_middle': self.flip_fn = lambda x: cv2.flip(cv2.flip(x, 0), 1) self.out_imsize = (960, 540) elif camera_name == 'kinect2_left': self.flip_fn = lambda x: cv2.flip(cv2.transpose(x), 1) self.out_imsize = (540, 960) elif camera_name == 'kinect2_right': self.flip_fn = lambda x: cv2.flip(cv2.transpose(x), 0) self.out_imsize = (540, 960) else: raise NotImplementedError # mesh if isinstance(object_name_or_mesh, str): filename = osp.join('data', 'object_models', '{:s}.ply'.format(object_name_or_mesh)) mesh_t = trimesh.load_mesh(filename) elif isinstance(object_name_or_mesh, dict): mesh_t = trimesh.Trimesh(vertices=object_name_or_mesh['vertices'], faces=object_name_or_mesh['faces']) else: raise NotImplementedError mesh_t.apply_transform(np.diag([mesh_scale, mesh_scale, mesh_scale, 1])) self.oX = mesh_t.vertices mesh = pyrender.Mesh.from_trimesh(mesh_t) self.scene = pyrender.Scene() self.scene.add(mesh, pose=np.eye(4)) # camera camera = pyrender.IntrinsicsCamera(K[0, 0], K[1, 1], K[0, 2], K[1, 2], znear=0.1, zfar=2.0) self.camera_node = pyrender.Node(camera=camera, matrix=np.eye(4)) self.scene.add_node(self.camera_node) self.cTopengl = np.eye(4) self.cTopengl[:3, :3] = txe.euler2mat(np.pi, 0, 0) # renderer object self.renderer = pyrender.OffscreenRenderer(960, 540)
def render_glcam( model_in, # model name or trimesh K=None, Rt=None, scale=1.0, rend_size=(512, 512), light_trans=np.array([[0], [100], [0]]), flat_shading=False): # Mesh creation if isinstance(model_in, str) is True: mesh = trimesh.load(model_in, process=False) else: mesh = model_in.copy() pr_mesh = pyrender.Mesh.from_trimesh(mesh) # Scene creation scene = pyrender.Scene() # Adding objects to the scene face_node = scene.add(pr_mesh) # Caculate fx fy cx cy from K fx, fy = K[0][0] * scale, K[1][1] * scale cx, cy = K[0][2] * scale, K[1][2] * scale # Camera Creation cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy, znear=0.1, zfar=100000) cam_pose = np.eye(4) cam_pose[:3, :3] = Rt[:3, :3].T cam_pose[:3, 3] = -Rt[:3, :3].T.dot(Rt[:, 3]) scene.add(cam, pose=cam_pose) # Set up the light light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=10.0) light_pose = cam_pose.copy() light_pose[0:3, :] += light_trans scene.add(light, pose=light_pose) # Rendering offscreen from that camera r = pyrender.OffscreenRenderer(viewport_width=rend_size[1], viewport_height=rend_size[0], point_size=1.0) if flat_shading is True: color, depth = r.render(scene, flags=pyrender.constants.RenderFlags.FLAT) else: color, depth = r.render(scene) # rgb to bgr for cv2 color = color[:, :, [2, 1, 0]] return depth, color
def render_smpl(vertices, faces, image, intrinsics, pose, transl, alpha=1.0, filename='render_sample.png'): img_size = image.shape[-2] material = pyrender.MetallicRoughnessMaterial( metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=(0.8, 0.3, 0.3, 1.0)) # Generate SMPL vertices mesh mesh = trimesh.Trimesh(vertices, faces) # Default rotation of SMPL body model rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) mesh.apply_transform(rot) mesh = pyrender.Mesh.from_trimesh(mesh, material=material) scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5)) scene.add(mesh, 'mesh') camera_pose = np.eye(4) camera_pose[:3, :3] = pose camera_pose[:3, 3] = transl camera = pyrender.IntrinsicsCamera(fx=intrinsics[0, 0], fy=intrinsics[1, 1], cx=intrinsics[0, 2], cy=intrinsics[1, 2]) scene.add(camera, pose=camera_pose) # Light information light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1) light_pose = np.eye(4) light_pose[:3, 3] = np.array([0, -1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([0, 1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([1, 1, 2]) scene.add(light, pose=light_pose) renderer = pyrender.OffscreenRenderer( viewport_width=img_size, viewport_height=img_size, point_size=1.0) color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA) valid_mask = (rend_depth > 0)[:,:,None] color = color.astype(np.float32) / 255.0 valid_mask = (rend_depth > 0)[:,:,None] output_img = color[:, :, :3] * valid_mask + (1 - valid_mask) * image / 255.0 cv2.imwrite(filename, 255 * output_img)
def depth_map(fx, fy, cx, cy): """The function generates depth maps for each model. Each model will have number_positions depth maps. :param fx: Focal length in X-Axis :param fy: Focal length in Y-Axis :param cx: Centre of the image in x-coordinates :param cy: Centre of the image in y-coordinates """ rootdir = "/home/aditya/Documents/Sem_3/TDCV/project_2/tracking/ballet_vicon/mesh/" path, dirs, files = next(os.walk(rootdir)) files = sorted(files) #for i in range(len(files)): for i in range(1): if files[i].endswith('.off'): files[i] = files[i][:-4] #parent_dir_depth_map = "/home/aditya/PycharmProjects/OpenCV-python/Project_2/Depth_maps" parent_dir_depth_map = "/home/aditya/PycharmProjects/OpenCV-python/Project_2" directory = files[i] path = os.path.join(parent_dir_depth_map, directory) os.mkdir(path) #Creates directories in the parent directory Depth_maps print("\nCreating directory ", files[i]) ballet_vicon_trimesh = trimesh.load(rootdir + files[i] + ".off") mesh = pyrender.Mesh.from_trimesh(ballet_vicon_trimesh) extrinsic_matrix = cam.camera_extrinsics(rootdir + files[i] + ".off") camera = pyrender.IntrinsicsCamera(fx, fy, cx, cy) for j in range(np.shape(extrinsic_matrix)[0]): scene = pyrender.Scene() scene.add(mesh) scene.add(camera, pose=extrinsic_matrix[j]) r = pyrender.OffscreenRenderer(1080, 1080) color, depth = r.render(scene) print(depth) fig = plt.figure() #plt.plot() plt.axis('off') plt.imshow(depth, cmap=plt.cm.gray_r) plt.imshow(color) fig.savefig(parent_dir_depth_map + "/" + files[i] + "/figure_" + str(j)) fig.savefig(parent_dir_depth_map + "/" + files[i] + "/cfig_" + str(j)) plt.close() np.savez_compressed( "/home/aditya/PycharmProjects/OpenCV-python/Project_2/TDCV-Project-2/trial1.npz", depth=depth)
def insert_camera(self, cam_param): focal, princpt, cam_no = cam_param['focal'], cam_param[ 'princpt'], cam_param['cam_no'] camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1]) self.scene.add(camera, 'cam_{}'.format(cam_no), np.linalg.inv(cam_param['extrinsics'])) # light light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=.8) self.scene.add(light, 'light_{}'.format(cam_no), np.linalg.inv(cam_param['extrinsics']))
def __call__(self, vertices, camera_translation, image, angle=180, rot_axis=[1, 0, 0]): material = pyrender.MetallicRoughnessMaterial( metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=(0.8, 0.3, 0.3, 1.0)) camera_translation[0] *= -1. mesh = trimesh.Trimesh(vertices, self.faces) if angle != 180: rot = trimesh.transformations.rotation_matrix( np.radians(180), [1, 0, 0]) mesh.apply_transform(rot) rot = trimesh.transformations.rotation_matrix( np.radians(90), [0, 1, 0]) mesh.apply_transform(rot) else: rot = trimesh.transformations.rotation_matrix( np.radians(angle), rot_axis) mesh.apply_transform(rot) mesh = pyrender.Mesh.from_trimesh(mesh, material=material) scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5)) scene.add(mesh, 'mesh') camera_pose = np.eye(4) camera_pose[:3, 3] = camera_translation camera = pyrender.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length, cx=self.camera_center[0], cy=self.camera_center[1]) scene.add(camera, pose=camera_pose) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1) light_pose = np.eye(4) light_pose[:3, 3] = np.array([0, -1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([0, 1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([1, 1, 2]) scene.add(light, pose=light_pose) color, rend_depth = self.renderer.render(scene, flags=pyrender.RenderFlags.RGBA) color = color.astype(np.float32) / 255.0 valid_mask = (rend_depth > 0)[:,:,None] output_img = (color[:, :, :3] * valid_mask + (1 - valid_mask) * image) return output_img
def __call__(self, vertices, camera_pose, image, color=(0.8, 0.3, 0.3, 1.0)): material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color) mesh = trimesh.Trimesh(vertices, self.faces) # Rotate mesh 180 deg around x (pyrender coordinate frame) rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) mesh.apply_transform(rot) mesh = pyrender.Mesh.from_trimesh(mesh, material=material) # Rotate trafo 180 deg around x (pyrender coordinate frame) Rx = np.array( [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=float) camera_pose = np.dot(camera_pose, Rx) scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5)) scene.add(mesh, 'mesh') camera = pyrender.IntrinsicsCamera(fx=self.focal_length[0], fy=self.focal_length[1], cx=self.camera_center[0], cy=self.camera_center[1]) scene.add(camera, pose=camera_pose) light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1) light_pose = np.eye(4) light_pose[:3, 3] = np.array([0, -1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([0, 1, 1]) scene.add(light, pose=light_pose) light_pose[:3, 3] = np.array([1, 1, 2]) scene.add(light, pose=light_pose) color, rend_depth = self.renderer.render( scene, flags=pyrender.RenderFlags.RGBA) valid_mask = (rend_depth > 0)[:, :, None] output_img = (color[:, :, :3] * valid_mask + (1 - valid_mask) * image).astype(np.uint8) return output_img
def __init__(self, image_name, camera_spec): self.image_name = image_name self.width = camera_spec[0] self.height = camera_spec[1] self.K = np.array([[camera_spec[2], 0.0, camera_spec[4]], [0.0, camera_spec[3], camera_spec[5]], [0.0, 0.0, 1.0]]) quat = Quaternion(camera_spec[6], camera_spec[7], camera_spec[8], camera_spec[9]) self.R = quat.rotation_matrix self.t = np.array([camera_spec[10], camera_spec[11], camera_spec[12]]).transpose() # Convert pose from Y-Down to Y-Up ("OpenGL") coordinates. X180 = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]]) self.R = np.dot(X180, self.R) self.t = np.dot(X180, self.t) self.pose = np.concatenate( (np.concatenate((self.R, np.expand_dims(self.t, axis=1)), axis=1), np.array([[0, 0, 0, 1]])), axis=0) # OpenGL expects us to provide a camera-to-world transform, so # invert the pose. self.pose = np.linalg.inv(self.pose) # Save the "standard" y-down pose as well. self.ydown_pose = np.concatenate( (np.concatenate((self.R, np.expand_dims(self.t, axis=1)), axis=1), np.array([[0, 0, 0, 1]])), axis=0) # Compute a reasonable zNear and zFar, based on the projection # of the camera location on the (negative) viewing direction, # assuming that the scene is located near the origin. camera_pos = -np.dot(np.transpose(self.R), self.t) view_dir = np.dot(np.transpose(self.R), np.array([[0.0], [0.0], [-1.0]])) scene_distance = -np.dot(np.transpose(camera_pos), view_dir) znear = max(scene_distance - 1e5, 1.0) zfar = scene_distance + 1e5 self.pyrender_camera = pyrender.IntrinsicsCamera(fx=camera_spec[2], fy=camera_spec[3], cx=camera_spec[4], cy=camera_spec[5], znear=znear, zfar=zfar, name=image_name)
def __init__(self, options): """Constructor.""" # RGB frames path self.rgb_path = os.path.join('datasets', 'ycb-video', options.video_id, 'rgb') # Estimates path self.estimates_path = os.path.join('results', options.algorithm, 'nrt', options.mask_set, 'validation', options.object, options.video_id, 'object-tracking_estimate.txt') # Mesh path object_mesh_path = os.path.join('models', 'YCB_models', 'models', options.object, 'textured') if options.mesh_type == 'low-quality': object_mesh_path += '_simple' object_mesh_path += '.obj' # Mesh trimesh_mesh = trimesh.load(object_mesh_path) mesh = pyrender.Mesh.from_trimesh(trimesh_mesh) # Scene self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0]) # Camera fx = 1066.8 fy = 1067.5 cx = 312.99 cy = 241.31 width = 640 height = 480 camera_transform = Quaternion(axis=[1.0, 0.0, 0.0], angle=numpy.pi).transformation_matrix self.camera = pyrender.IntrinsicsCamera(fx=fx, fy=fy, cx=cx, cy=cy) self.scene.add(self.camera, pose=camera_transform) # Light self.light = pyrender.PointLight(intensity=20.0) self.scene.add(self.light) # Object node self.mesh_node = pyrender.Node(mesh=mesh, matrix=numpy.eye(4)) self.scene.add_node(self.mesh_node) # Renderer self.renderer = pyrender.OffscreenRenderer(width, height)
def __init__(self, focal_length=5000, center=None, img_w=None, img_h=None): self.renderer = pyrender.OffscreenRenderer(viewport_width=img_w, viewport_height=img_h, point_size=1.0) self.focal_length = focal_length self.camera_center = center self.camera = pyrender.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length, cx=self.camera_center[0], cy=self.camera_center[1], zfar=1000) self.camera_pose = np.eye(4) self.camera_pose[:3, :3] = np.eye(3)