def test_mesh_renderer_to(self): """ Test moving all the tensors in the mesh renderer to a new device. """ device1 = torch.device("cpu") R, T = look_at_view_transform(1500, 0.0, 0.0) # Init shader settings materials = Materials(device=device1) lights = PointLights(device=device1) lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None] raster_settings = RasterizationSettings( image_size=256, blur_radius=0.0, faces_per_pixel=1 ) cameras = FoVPerspectiveCameras( device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100 ) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) blend_params = BlendParams( 1e-4, 1e-4, background_color=torch.zeros(3, dtype=torch.float32, device=device1), ) shader = SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) mesh = ico_sphere(2, device1) verts_padded = mesh.verts_padded() textures = TexturesVertex( verts_features=torch.ones_like(verts_padded, device=device1) ) mesh.textures = textures self._check_mesh_renderer_props_on_device(renderer, device1) # Test rendering on cpu output_images = renderer(mesh) self.assertEqual(output_images.device, device1) # Move renderer and mesh to another device and re render # This also tests that background_color is correctly moved to # the new device device2 = torch.device("cuda:0") renderer = renderer.to(device2) mesh = mesh.to(device2) self._check_mesh_renderer_props_on_device(renderer, device2) output_images = renderer(mesh) self.assertEqual(output_images.device, device2)
def render(mesh, model_id, shapenet_dataset, device, camera=None): # Rendering settings. # camera_distance = 1 # camera_elevation = 0.5 + 100 * random.random() # camera_azimuth = 30 + 90 * random.random() # R, T = look_at_view_transform(camera_distance, camera_elevation, camera_azimuth) # camera = FoVPerspectiveCameras(R=R, T=T, device=device) # raster_settings = RasterizationSettings(image_size=512) # lights = PointLights(location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],device=device) # #rendering_settings = cameras, raster_settings, lights # image = shapenet_dataset.render( # model_ids=[model_id], # device=device, # cameras=camera, # raster_settings=raster_settings, # lights=lights, # )[..., :3] if not camera: camera_elevation = 0 + 180 * torch.rand( (1)) #torch.linspace(0, 180, batch_size) camera_azimuth = -180 + 2 * 180 * torch.rand( (1)) #torch.linspace(-180, 180, batch_size) #R, T = look_at_view_transform(camera_distance, camera_elevation, camera_azimuth) R, T = look_at_view_transform(1.9, camera_elevation, camera_azimuth) camera = FoVPerspectiveCameras(R=R, T=T, device=device) camera.eval() #necessary ? raster_settings = RasterizationSettings(image_size=224) # TODO ????? lights = PointLights(location=torch.tensor([0.0, 1.0, -2.0], device=device)[None], device=device) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=camera, raster_settings=raster_settings), shader=HardPhongShader(device=device, cameras=camera)) renderer.eval() #rendering_settings = cameras, raster_settings, lights #image = shapenet_dataset.render( # model_ids=[model_id], # device=device, # cameras=camera, # raster_settings=raster_settings, # lights=lights, #)[..., :3] image = renderer(mesh)[..., :3] #plt.imshow(image.squeeze().detach().cpu().numpy()) #plt.show() image = image.permute(0, 3, 1, 2) return image, camera #TODO batch of images
def __init__(self, image_size): super(Renderer, self).__init__() self.image_size = image_size self.dog_obj = load_objs_as_meshes(['data/dog_B/dog_B/dog_B_tpose.obj']) raster_settings = RasterizationSettings( image_size=self.image_size, blur_radius=0.0, faces_per_pixel=1, bin_size=None ) R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=R.device, R=R, T=T) lights = PointLights(device=R.device, location=[[0.0, 1.0, 0.0]]) self.renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=SoftPhongShader( device=R.device, cameras=cameras, lights=lights ) )
def set_renderer(image_size=512, use_sfm=False): # Setup device = torch.device("cuda:0") torch.cuda.set_device(device) # Initialize an OpenGL perspective camera. R, T = look_at_view_transform(2.0, 0, 180) if use_sfm: cameras = SfMPerspectiveCameras(focal_length=580.0, device=device, R=R, T=T) else: cameras = OpenGLOrthographicCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=image_size, blur_radius=0.0, faces_per_pixel=1, bin_size=None, max_faces_per_bin=None) lights = PointLights(device=device, location=((2.0, 2.0, 2.0), )) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shader = HardPhongShader(device=device, cameras=cameras, lights=lights) if use_sfm: renderer = MeshRendererWithDepth(rasterizer=rasterizer, shader=shader) else: renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) return renderer
def set_renderer(): # Setup device = torch.device("cuda:0") torch.cuda.set_device(device) # Initialize an OpenGL perspective camera. R, T = look_at_view_transform(2.0, 0, 180) cameras = OpenGLOrthographicCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size = None, max_faces_per_bin = None ) lights = PointLights(device=device, location=((2.0, 2.0, 2.0),)) renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=HardPhongShader( device=device, cameras=cameras, lights=lights ) ) return renderer
def visualize_pred(img, category, pred, image_name, mesh_path, down_sample_rate=8, device='cuda:0'): render_image_size = max(IMAGE_SIZES[category]) crop_size = IMAGE_SIZES[category] cameras = OpenGLPerspectiveCameras(device=device, fov=12.0) raster_settings = RasterizationSettings(image_size=render_image_size, blur_radius=0.0, faces_per_pixel=1, bin_size=0) raster_settings1 = RasterizationSettings(image_size=render_image_size // down_sample_rate, blur_radius=0.0, faces_per_pixel=1, bin_size=0) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings1) lights = PointLights(device=device, location=((2.0, 2.0, -2.0), )) phong_renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=HardPhongShader(device=device, lights=lights, cameras=cameras)) theta_pred = pred['theta'] elevation_pred = pred['elevation'] azimuth_pred = pred['azimuth'] distance_pred = pred['distance'] cad_idx = pred['cad_idx'] dx = pred['dx'] * down_sample_rate dy = pred['dy'] * down_sample_rate x3d, xface = load_off(mesh_path + '/%02d.off' % cad_idx) verts = torch.from_numpy(x3d).to(device) verts = pre_process_mesh_pascal(verts) faces = torch.from_numpy(xface).to(device) verts_rgb = torch.ones_like(verts)[None] textures = Textures(verts_rgb.to(device)) meshes = Meshes(verts=[verts], faces=[faces], textures=textures) img_ = get_img(theta_pred, elevation_pred, azimuth_pred, distance_pred, meshes, phong_renderer, crop_size, render_image_size, device) C = camera_position_from_spherical_angles(distance_pred, elevation_pred, azimuth_pred, degrees=False, device=device) # get_image = np.concatenate((img, alpha_merge_imgs(img, img_)), axis=1) img_ = shift_img(img_, dx, dy) get_image = alpha_merge_imgs(img, img_) img = Image.fromarray(get_image).save(image_name)
def define_render(num): shapenet_cam_params_file = '../data/metadata/rendering_metadata.json' with open(shapenet_cam_params_file) as f: shapenet_cam_params = json.load(f) param_num = num R, T = look_at_view_transform( dist=shapenet_cam_params["distance"][param_num] * 5, elev=shapenet_cam_params["elevation"][param_num], azim=shapenet_cam_params["azimuth"][param_num]) cameras = FoVPerspectiveCameras( device=device, R=R, T=T, fov=shapenet_cam_params["field_of_view"][param_num]) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, ) lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]]) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=cameras, lights=lights)) return renderer
def set_renderer(self): cameras = OpenGLPerspectiveCameras(device=self.cuda_device, degrees=True, fov=VIEW['fov'], znear=VIEW['znear'], zfar=VIEW['zfar']) raster_settings = RasterizationSettings(image_size=VIEW['viewport'][0], blur_radius=0.0, faces_per_pixel=1, bin_size=0) lights = DirectionalLights( device=self.cuda_device, direction=((-40, 200, 100), ), ambient_color=((0.5, 0.5, 0.5), ), diffuse_color=((0.5, 0.5, 0.5), ), specular_color=((0.0, 0.0, 0.0), ), ) self.renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader(device=self.cuda_device, cameras=cameras, lights=lights))
def render_obj(verts, faces, distance, elevation, azimuth): device = torch.device("cuda:0") verts_rgb = torch.ones_like(verts)[None] textures = Textures(verts_rgb=verts_rgb.to(device)) cur_mesh = Meshes(verts=[verts.to(device)], faces=[faces.to(device)], textures=textures) cameras = OpenGLPerspectiveCameras(device=device) blend_params = BlendParams(sigma=1e-4, gamma=1e-4) raster_settings = RasterizationSettings(image_size=256, blur_radius=0.0, faces_per_pixel=1, bin_size=0) lights = PointLights(device=device, location=((2.0, 2.0, -2.0), )) phong_renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=PhongShader(device=device, lights=lights)) R, T = look_at_view_transform(distance, elevation, azimuth, device=device) return phong_renderer(meshes_world=cur_mesh, R=R, T=T).cpu().numpy()
def createRenderer(image_size,faces_per_pixel,lights_location): # Function: createRenderer # Inputs: image_size,faces_per_pixel,lights_location # Process: creates an image renderer # Output: returns renderer cameras = OpenGLPerspectiveCameras() #Settings for Raster raster_settings = RasterizationSettings( image_size=image_size, blur_radius=0.0, faces_per_pixel=faces_per_pixel, ) # We can add a point light in front of the object. lights = PointLights(location=(lights_location,)) created_renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=HardPhongShader(cameras=cameras, lights=lights) ) return created_renderer
def render_cubified_voxels(voxels: torch.Tensor, shader_type=HardPhongShader, device="cpu", **kwargs): """ Use the Cubify operator to convert inputs voxels to a mesh and then render that mesh. Args: voxels: FloatTensor of shape (N, D, D, D) where N is the batch size and D is the number of voxels along each dimension. shader_type: shader_type: shader_type: Shader to use for rendering. Examples include HardPhongShader (default), SoftPhongShader etc or any other type of valid Shader class. device: torch.device on which the tensors should be located. **kwargs: Accepts any of the kwargs that the renderer supports. Returns: Batch of rendered images of shape (N, H, W, 3). """ cubified_voxels = cubify(voxels, CUBIFY_THRESH).to(device) cubified_voxels.textures = TexturesVertex(verts_features=torch.ones_like( cubified_voxels.verts_padded(), device=device)) cameras = BlenderCamera(device=device) renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=kwargs.get("raster_settings", RasterizationSettings()), ), shader=shader_type( device=device, cameras=cameras, lights=kwargs.get("lights", PointLights()).to(device), ), ) return renderer(cubified_voxels)
def main(args): mesh_path = args.mesh_path mesh_instance = mesh.TriangleMesh(mesh_path=mesh_path) mesh_instance.load_pytorch_mesh_from_file() camera_instance = Camera() camera_instance.lookAt(args.dist, args.elev, args.azim) light_instance = Lights() light_instance.setup_light([args.light_x, args.light_y, args.light_z]) rasterizer_instance = Rasterizer() rasterizer_instance.init_rasterizer(camera_instance.camera) shader_instance = Shader() shader_instance.setup_shader(camera_instance.camera, light_instance.light) renderer_instance = MeshRenderer(rasterizer=rasterizer_instance.rasterizer, shader=shader_instance.shader) images = renderer_instance(mesh_instance.pytorch_mesh) np_image = images[0].cpu().detach().numpy() * 255.0 np_image = np_image.astype('uint8') pil_image = Image.fromarray(np_image) pil_image.save(args.out_path)
def __init__(self, dir: str, rasterization_settings: dict, znear: float = 1.0, zfar: float = 1000.0, scale_min: float = 0.5, scale_max: float = 2.0, device: str = 'cuda'): super(ToyNeuralGraphicsDataset, self).__init__() device = torch.device(device) self.device = device self.scale_min = scale_min self.scale_max = scale_max self.scale_range = scale_max - scale_min objs = [ os.path.join(dir, f) for f in os.listdir(dir) if f.endswith('.obj') ] self.meshes = load_objs_as_meshes(objs, device=device) R, T = look_at_view_transform(0, 0, 0) self.cameras = FoVPerspectiveCameras(R=R, T=T, znear=znear, zfar=zfar, device=device) self.renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=self.cameras, raster_settings=RasterizationSettings(**rasterization_settings), ), shader=HardFlatShader( device=device, cameras=self.cameras, ))
def render_sil(self, meshes): self.renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=self.cameras, raster_settings=self.text_raster_settings), shader=SoftSilhouetteShader(blend_params=self.blend_params)) return self.renderer(meshes_world=meshes)
def project_mesh(mesh, angle): start = time.time() m = Metadata() R, T = look_at_view_transform(1.75, -45, angle, up=((0, 1, 0), ), at=((0, -0.25, 0), )) cameras = OpenGLPerspectiveCameras(device=m.device, R=R, T=T) raster_settings = m.raster_settings lights = m.lights renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=HardFlatShader(cameras=cameras, device=m.device, lights=lights)) verts = mesh.verts_list()[0] # faces = meshes.faces_list()[0] verts_rgb = torch.ones_like(verts)[None] # (1, V, 3) # verts_rgb = torch.ones((len(mesh.verts_list()[0]), 1))[None] # (1, V, 3) textures = Textures(verts_rgb=verts_rgb.to(m.device)) mesh.textures = textures mesh.textures._num_faces_per_mesh = mesh._num_faces_per_mesh.tolist() mesh.textures._num_verts_per_mesh = mesh._num_verts_per_mesh.tolist() image = renderer(mesh) return image
def create_renderer(self): self.num_angles = self.config.num_angles azim = torch.linspace(-1 * self.config.angle_range, self.config.angle_range, self.num_angles) R, T = look_at_view_transform(dist=1.0, elev=0, azim=azim) T[:, 1] = -85 T[:, 2] = 200 cameras = FoVPerspectiveCameras(device=self.device, R=R, T=T) raster_settings = RasterizationSettings( image_size=self.config.img_size, blur_radius=0.0, faces_per_pixel=1, ) lights = PointLights(device=self.device, location=[[0.0, 85, 100.0]]) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=HardPhongShader(device=self.device, cameras=cameras, lights=lights)) return renderer
def render_with_batch_size(self, batch_size, dist, light_location, output_path): self.meshes = self.meshes.extend(batch_size) self.batch_size = batch_size elev = torch.linspace(0, 180, batch_size) azim = torch.linspace(-180, 180, batch_size) self.R, self.T = look_at_view_transform(dist=dist, elev=elev, azim=azim) self.cameras = OpenGLPerspectiveCameras(device=self.device, R=self.R, T=self.T) #set light locatioin self.light_location = light_location lights = PointLights(device=self.device, location=[self.light_location]) # call pytorch3d mesh renderer with shong shader renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=self.cameras, raster_settings=self.raster_settings), shader=TexturedSoftPhongShader(device=self.device, cameras=self.cameras, lights=lights)) images = renderer(self.meshes, cameras=self.cameras, lights=lights) for i in range(self.batch_size): img = images[i, ..., :3].cpu().numpy() * 255 img = img.astype('uint8') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.imwrite(output_path + 'render-image-' + str(i) + '.png', img)
def render_mesh(verts, faces): device = verts[0].get_device() N = len(verts) num_verts_per_mesh = [] for i in range(N): num_verts_per_mesh.append(verts[i].shape[0]) verts_rgb = torch.ones((N, np.max(num_verts_per_mesh), 3), requires_grad=False, device=device) for i in range(N): verts_rgb[i, num_verts_per_mesh[i]:, :] = -1 textures = Textures(verts_rgb=verts_rgb) meshes = Meshes(verts=verts, faces=faces, textures=textures) elev = torch.rand(N) * 30 - 15 azim = torch.rand(N) * 360 - 180 R, T = look_at_view_transform(dist=2, elev=elev, azim=azim) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) sigma = 1e-4 raster_settings = RasterizationSettings( image_size=128, blur_radius=np.log(1. / 1e-4 - 1.) * sigma, faces_per_pixel=40, perspective_correct=False) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=SoftSilhouetteShader()) return renderer(meshes)
def _get_renderer(self, device): R, T = look_at_view_transform(10, 0, 0) # camera's position cameras = FoVPerspectiveCameras( device=device, R=R, T=T, znear=0.01, zfar=50, fov=2 * np.arctan(self.img_size // 2 / self.focal) * 180. / np.pi) lights = PointLights(device=device, location=[[0.0, 0.0, 1e5]], ambient_color=[[1, 1, 1]], specular_color=[[0., 0., 0.]], diffuse_color=[[0., 0., 0.]]) raster_settings = RasterizationSettings( image_size=self.img_size, blur_radius=0.0, faces_per_pixel=1, ) blend_params = blending.BlendParams(background_color=[0, 0, 0]) renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=cameras, lights=lights, blend_params=blend_params)) return renderer
def setup(self, device): R, T = look_at_view_transform(self.viewpoint_distance, self.viewpoint_elevation, self.viewpoint_azimuth, device=device) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=self.opt.fast_image_size, blur_radius=self.opt.raster_blur_radius, faces_per_pixel=self.opt.raster_faces_per_pixel, ) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) lights = PointLights(device=device, location=[self.opt.lights_location]) lights = DirectionalLights(device=device, direction=[self.opt.lights_direction]) shader = SoftPhongShader( device=device, cameras=cameras, lights=lights, blend_params=BlendParams( self.opt.blend_params_sigma, self.opt.blend_params_gamma, self.opt.blend_params_background_color, ), ) self.renderer = MeshRenderer( rasterizer=rasterizer, shader=shader, )
def __get_renderer(self, render_size, lights): cameras = FoVOrthographicCameras( device=self.device, znear=0.1, zfar=10.0, max_y=1.0, min_y=-1.0, max_x=1.0, min_x=-1.0, scale_xyz=((1.0, 1.0, 1.0), ), # (1, 3) ) raster_settings = RasterizationSettings( image_size=render_size, blur_radius=0, faces_per_pixel=1, ) blend_params = BlendParams(sigma=1e-4, gamma=1e-4, background_color=(0, 0, 0)) renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader(device=self.device, cameras=cameras, lights=lights, blend_params=blend_params)) return renderer
def init_renderer(self): # nsh_face_mesh = meshio.Mesh('data/mesh/nsh_bfm_face.obj') # self.nsh_face_tri = torch.from_numpy(nsh_face_mesh.triangles).type( # torch.int64).to(self.device) R, T = look_at_view_transform(10, 0, 0) cameras = OpenGLPerspectiveCameras(znear=0.001, zfar=30.0, aspect_ratio=1.0, fov=12.5936, degrees=True, R=R, T=T, device=self.device) raster_settings = RasterizationSettings(image_size=self.im_size, blur_radius=0.0, faces_per_pixel=1, bin_size=0, cull_backfaces=True) self.rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) lights = DirectionalLights(device=self.device) shader = TexturedSoftPhongShader(device=self.device, cameras=cameras, lights=lights) self.renderer = MeshRenderer(rasterizer=self.rasterizer, shader=shader)
def test_zbuffer_render(): zbuffer_renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings, ), shader=IdentityShader()) plot_channels(render(zbuffer_renderer, scene))
def test_sampler(): heatmap = torch.zeros(1, 1, 5, 5) heatmap[0, 0, 2, 2] = 1.0 from filter import sample_particles_from_heatmap_2d hws, alphas = sample_particles_from_heatmap_2d(heatmap, {'cup': 1}, deterministic=True, h_min=-4.0, h_max=4.0, w_min=-4.0, w_max=4.0) vert = torch.tensor( [[-1, -1.1, 0], [1, -1.1, 0], [1, 1.1, 0], [-1, 1.1, 0]], dtype=torch.float) face = torch.LongTensor([[0, 1, 2], [0, 2, 3]]) white = torch.ones_like(vert) from tracker import World world = World() world.add_mesh('cup', vert, face, white) scene = world.create_scene(hws, alphas, 1) print(hws['cup']) print(scene.verts_padded().shape) plt.scatter(scene.verts_padded()[0, :, 0], scene.verts_padded()[0, :, 1]) plt.show() cameras = FoVOrthographicCameras(device=device, max_x=2.5, max_y=2.5, min_x=-2.5, min_y=-2.5, scale_xyz=((1, 1, 1), )) raster_settings = RasterizationSettings( image_size=5, blur_radius=0, faces_per_pixel=6, ) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings, ), shader=IdentityShader()) distance, elevation, azimuth = 30, 0.0, 0 R, T = look_at_view_transform(distance, elevation, azimuth, device=device) image = renderer(meshes_world=scene.to(device), R=R, T=T) fig = plt.figure(figsize=(10, 10)) for i in range(image.size(-2)): panel = fig.add_subplot(3, 3, i + 1) panel.imshow(image[..., i, 0:3].squeeze().cpu()) plt.grid(False) plt.show()
def test_bounding_box(): verts = torch.tensor([[-2, -1, 0], [2, -1, 0], [2, 1, 0], [-2, 1, 0]], dtype=torch.float) * 20.0 faces = torch.LongTensor([[0, 1, 2], [0, 2, 3]]) white = torch.ones_like(verts) red = white * torch.tensor([1.0, 0.0, 0.0]) green = white * torch.tensor([0.0, 1.0, 0.0]) blue = white * torch.tensor([0.0, 0.0, 1.0]) meshes = Meshes(verts=[verts], faces=[faces], textures=TexturesVertex([blue])) distance = 30 elevation = 0.0 azimuth = 0 R, T = look_at_view_transform(distance, elevation, azimuth) cameras = FoVOrthographicCameras(max_x=64.0, max_y=64.0, min_x=-64.0, min_y=-64.0, scale_xyz=((1, 1, 1), ), R=R, T=T) bb = BoundingBoxes(meshes, cameras, screen_size=(128, 128)) raster_settings = RasterizationSettings( image_size=128, blur_radius=0, faces_per_pixel=6, ) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings, ), shader=IdentityShader()) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 10), constrained_layout=False) ax.imshow(renderer(meshes)[0, :, :, 0, :]) boxes_rect = patches.Rectangle(bb.bottom_left(0), width=bb.width(0), height=bb.height(0), linewidth=4, edgecolor='r', facecolor='none') ax.add_patch(boxes_rect) plt.show()
def createRenderer(device, camera, light, imageSize): ''' It creates a pytorch3D renderer with the given camera pose, light source and output image size. Parameters ---------- device : Device on which the renderer is created. camera : Camera pose. light : Position of the light source. imageSize : The size of the rendered image. Returns ------- renderer : Pytorch3D renderer. ''' if camera is None: camera = (2.0, -20.0, 180.0) if light is None: light = (0.0, 2.0, 0.0) # Initialize an OpenGL perspective camera. # With world coordinates +Y up, +X left and +Z into the screen. R, T = look_at_view_transform(camera[0], camera[1], camera[2]) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Define the settings for rasterization and shading. Here we set the output image to be of size # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1 # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of # the difference between naive and coarse-to-fine rasterization. raster_settings = RasterizationSettings( image_size=imageSize, blur_radius=0.0, faces_per_pixel=1, ) # Place a point light at -y direction. lights = PointLights(device=device, location=[[light[0], light[1], light[2]]]) # Create a phong renderer by composing a rasterizer and a shader. renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=HardPhongShader(device=device, cameras=cameras, lights=lights)) return renderer
def _set_renderer(self): if self.cameras is None: raise ValueError('cameras is None in pytorch3D renderer!') rasterizer = MeshRasterizer(cameras=self.cameras, raster_settings=self.raster_settings) texture_shader = TexturedSoftPhongShader(device=self.device, cameras=self.cameras, lights=self.lights) silhouette_shader = SoftSilhouetteShader( blend_params=BlendParams(sigma=1e-4, gamma=1e-4)) self.mesh_renderer = MeshRenderer(rasterizer=rasterizer, shader=texture_shader) self.mask_renderer = MeshRenderer(rasterizer=rasterizer, shader=silhouette_shader)
def _set_renderer(self): if self.cameras is None: raise ValueError('cameras is None in pytorch3D renderer!') rasterizer = MeshRasterizer(cameras=self.cameras, raster_settings=self.raster_settings) silhouette_shader = SoftSilhouetteShader(blend_params=BlendParams(sigma=1e-4, gamma=1e-4)) self.mask_renderer = MeshRenderer(rasterizer=rasterizer, shader=silhouette_shader)
def test_scene(): world = engine.World() world.add_mesh('red_box', verts, faces, red) world.add_mesh('green_box', verts, faces, green) world.add_mesh('blue_box', verts, faces, blue) scene_spec = [ {'red_box_0': 'red_box', 'green_box_0': 'green_box'}, {'blue_box_0': 'blue_box', 'blue_box_1': 'blue_box'} ] world.create_scenes(scene_spec) poses = [ [Translate(0, -30, 0), Translate(-10, -10, 0)], [Translate(40, 0, 0), Translate(-10, -10, 0)] ] world.update_scenes(poses) batch = world.batch() labels = world.labels() distance = 30 elevation = 0.0 azimuth = 0 R, T = look_at_view_transform(distance, elevation, azimuth) cameras = FoVOrthographicCameras(max_x=64.0, max_y=64.0, min_x=-64.0, min_y=-64.0, scale_xyz=((1, 1, 1),), R=R, T=T) raster_settings = RasterizationSettings( image_size=128, blur_radius=0, faces_per_pixel=6, ) renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings, ), shader=IdentityShader() ) boxes = world.bounding_boxes(cameras, (128, 128)) image = renderer(batch) fig, ax = plt.subplots(nrows=1, ncols=1) ax.imshow(image[0, :, :, 0, :]) for box in boxes[0]: ax.add_patch(box.get_patch()) plt.show()
def __init__(self, image_size, device): super(Renderer, self).__init__() self.image_size = image_size R, T = look_at_view_transform(2.7, 0, 0, device=device) self.cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) self.mesh_color = torch.FloatTensor(config.MESH_COLOR).to(device)[None, None, :] / 255.0 blend_params = BlendParams(sigma=1e-4, gamma=1e-4) raster_settings = RasterizationSettings( image_size=self.image_size, blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma, faces_per_pixel=100, ) self.silhouette_renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=self.cameras, raster_settings=raster_settings ), shader=SoftSilhouetteShader(blend_params=blend_params) ) raster_settings_color = RasterizationSettings( image_size=self.image_size, blur_radius=0.0, faces_per_pixel=1, ) lights = PointLights(device=device, location=[[0.0, 0.0, 3.0]]) self.color_renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=self.cameras, raster_settings=raster_settings_color ), shader=HardPhongShader( device=device, cameras=self.cameras, lights=lights, ) )