コード例 #1
0
ファイル: test_render_multigpu.py プロジェクト: r23/pytorch3d
    def test_mesh_renderer_to(self):
        """
        Test moving all the tensors in the mesh renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device1)
        lights = PointLights(device=device1)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        cameras = FoVPerspectiveCameras(
            device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100
        )
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)

        blend_params = BlendParams(
            1e-4,
            1e-4,
            background_color=torch.zeros(3, dtype=torch.float32, device=device1),
        )

        shader = SoftPhongShader(
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
        )
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        textures = TexturesVertex(
            verts_features=torch.ones_like(verts_padded, device=device1)
        )
        mesh.textures = textures
        self._check_mesh_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device1)

        # Move renderer and mesh to another device and re render
        # This also tests that background_color is correctly moved to
        # the new device
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        mesh = mesh.to(device2)
        self._check_mesh_renderer_props_on_device(renderer, device2)
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device2)
コード例 #2
0
ファイル: Renderer.py プロジェクト: kalyo-zjl/WM3DR
 def __init__(self, rasterizer, shader_rgb, shader_mask, cameras):
   super(Renderer, self).__init__()
   self.cameras = cameras
   self.rasterizer = rasterizer
   self.shader_rgb = shader_rgb
   self.shader_mask = shader_mask
   self.lights = DirectionalLights(
           ambient_color=((1, 1, 1),),
           diffuse_color=((0., 0., 0.),),
           specular_color=((0., 0., 0.),),
           direction=((0, 0, 1),),
           device='cuda',
       )
   self.materials = Materials(device='cuda')
コード例 #3
0
 def __init__(self,
              device="cpu",
              cameras=None,
              lights=None,
              materials=None,
              blend_params=None):
     super().__init__()
     self.lights = lights if lights is not None else PointLights(
         device=device)
     self.materials = (materials if materials is not None else Materials(
         device=device))
     self.cameras = cameras
     self.blend_params = blend_params if blend_params is not None else BlendParams(
     )
コード例 #4
0
ファイル: test_render_implicit.py プロジェクト: r23/pytorch3d
    def _compare_with_meshes_renderer(self,
                                      image_size,
                                      batch_size=11,
                                      sphere_diameter=0.6):
        """
        Generate a spherical RGB volumetric function and its corresponding mesh
        and check whether MeshesRenderer returns the same images as the
        corresponding ImplicitRenderer.
        """

        # generate NDC camera extrinsics and intrinsics
        cameras = init_cameras(batch_size, image_size=image_size, ndc=True)

        # get rand offset of the volume
        sphere_centroid = torch.randn(batch_size, 3,
                                      device=cameras.device) * 0.1
        sphere_centroid.requires_grad = True

        # init the grid raysampler with the ndc grid
        raysampler = NDCMultinomialRaysampler(
            image_width=image_size[1],
            image_height=image_size[0],
            n_pts_per_ray=256,
            min_depth=0.1,
            max_depth=2.0,
        )

        # get the EA raymarcher
        raymarcher = EmissionAbsorptionRaymarcher()

        # jitter the camera intrinsics a bit for each render
        cameras_randomized = cameras.clone()
        cameras_randomized.principal_point = (
            torch.randn_like(cameras.principal_point) * 0.3)
        cameras_randomized.focal_length = (
            cameras.focal_length +
            torch.randn_like(cameras.focal_length) * 0.2)

        # the list of differentiable camera vars
        cam_vars = ("R", "T", "focal_length", "principal_point")
        # enable the gradient caching for the camera variables
        for cam_var in cam_vars:
            getattr(cameras_randomized, cam_var).requires_grad = True

        # get the implicit renderer
        images_opacities = ImplicitRenderer(
            raysampler=raysampler, raymarcher=raymarcher)(
                cameras=cameras_randomized,
                volumetric_function=spherical_volumetric_function,
                sphere_centroid=sphere_centroid,
                sphere_diameter=sphere_diameter,
            )[0]

        # check that the renderer does not erase gradients
        loss = images_opacities.sum()
        loss.backward()
        for check_var in (
                *[
                    getattr(cameras_randomized, cam_var)
                    for cam_var in cam_vars
                ],
                sphere_centroid,
        ):
            self.assertIsNotNone(check_var.grad)

        # instantiate the corresponding spherical mesh
        ico = ico_sphere(level=4, device=cameras.device).extend(batch_size)
        verts = (torch.nn.functional.normalize(ico.verts_padded(), dim=-1) *
                 sphere_diameter + sphere_centroid[:, None])
        meshes = Meshes(
            verts=verts,
            faces=ico.faces_padded(),
            textures=TexturesVertex(verts_features=(
                torch.nn.functional.normalize(verts, dim=-1) * 0.5 + 0.5)),
        )

        # instantiate the corresponding mesh renderer
        lights = PointLights(device=cameras.device, location=[[0.0, 0.0, 0.0]])
        renderer_textured = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras_randomized,
                raster_settings=RasterizationSettings(
                    image_size=image_size,
                    blur_radius=1e-3,
                    faces_per_pixel=10,
                    z_clip_value=None,
                    perspective_correct=False,
                ),
            ),
            shader=SoftPhongShader(
                device=cameras.device,
                cameras=cameras_randomized,
                lights=lights,
                materials=Materials(
                    ambient_color=((2.0, 2.0, 2.0), ),
                    diffuse_color=((0.0, 0.0, 0.0), ),
                    specular_color=((0.0, 0.0, 0.0), ),
                    shininess=64,
                    device=cameras.device,
                ),
                blend_params=BlendParams(sigma=1e-3,
                                         gamma=1e-4,
                                         background_color=(0.0, 0.0, 0.0)),
            ),
        )

        # get the mesh render
        images_opacities_meshes = renderer_textured(meshes,
                                                    cameras=cameras_randomized,
                                                    lights=lights)

        if DEBUG:
            outdir = tempfile.gettempdir() + "/test_implicit_vs_mesh_renderer"
            os.makedirs(outdir, exist_ok=True)

            frames = []
            for (image_opacity,
                 image_opacity_mesh) in zip(images_opacities,
                                            images_opacities_meshes):
                image, opacity = image_opacity.split([3, 1], dim=-1)
                image_mesh, opacity_mesh = image_opacity_mesh.split([3, 1],
                                                                    dim=-1)
                diff_image = (((image - image_mesh) * 0.5 + 0.5).mean(
                    dim=2, keepdim=True).repeat(1, 1, 3))
                image_pil = Image.fromarray((torch.cat(
                    (
                        image,
                        image_mesh,
                        diff_image,
                        opacity.repeat(1, 1, 3),
                        opacity_mesh.repeat(1, 1, 3),
                    ),
                    dim=1,
                ).detach().cpu().numpy() * 255.0).astype(np.uint8))
                frames.append(image_pil)

            # export gif
            outfile = os.path.join(outdir, "implicit_vs_mesh_render.gif")
            frames[0].save(
                outfile,
                save_all=True,
                append_images=frames[1:],
                duration=batch_size // 15,
                loop=0,
            )
            print(f"exported {outfile}")

            # export concatenated frames
            outfile_cat = os.path.join(outdir, "implicit_vs_mesh_render.png")
            Image.fromarray(
                np.concatenate([np.array(f) for f in frames],
                               axis=0)).save(outfile_cat)
            print(f"exported {outfile_cat}")

        # compare the renders
        diff = (images_opacities - images_opacities_meshes).abs().mean(dim=-1)
        mu_diff = diff.mean(dim=(1, 2))
        std_diff = diff.std(dim=(1, 2))
        self.assertClose(mu_diff, torch.zeros_like(mu_diff), atol=5e-2)
        self.assertClose(std_diff, torch.zeros_like(std_diff), atol=6e-2)
コード例 #5
0
            image_size=args.window_size,
            blur_radius=0.0,
            faces_per_pixel=1,
            bin_size=
            None,  # this setting controls whether naive or coarse-to-fine rasterization is used
            max_faces_per_bin=None  # this setting is for coarse rasterization
        ))

    # ライトの作成
    lights = PointLights(
        device=device,
        location=[[args.light_pos_x, args.light_pos_y, args.light_pos_z]])

    # マテリアルの作成
    materials = Materials(device=device,
                          specular_color=[[0.2, 0.2, 0.2]],
                          shininess=10.0)

    # シェーダーの作成
    if (args.shader == "soft_silhouette_shader"):
        shader = SoftSilhouetteShader()
    elif (args.shader == "soft_phong_shader"):
        shader = SoftPhongShader(device=device,
                                 cameras=cameras,
                                 lights=lights,
                                 materials=materials)
    elif (args.shader == "textured_soft_phong_shader"):
        shader = TexturedSoftPhongShader(device=device,
                                         cameras=cameras,
                                         lights=lights,
                                         materials=materials)
コード例 #6
0
    if num_views != 1:
        camera = PerspectiveCameras(device=device,
                                    focal_length=4500,
                                    principal_point=((512, 512), ),
                                    R=Rtotal[None, 1, ...],
                                    T=Ttotal[None, 1, ...],
                                    image_size=((1024, 1024), ))
    else:
        camera = PerspectiveCameras(device=device,
                                    focal_length=4500,
                                    principal_point=((512, 512), ),
                                    R=Rtotal,
                                    T=Ttotal,
                                    image_size=((1024, 1024), ))

    mymaterials = Materials(device=device, shininess=8)
    raster_settings = RasterizationSettings(
        image_size=1024,
        blur_radius=0.0,
        faces_per_pixel=1,
    )
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=camera, raster_settings=raster_settings),
                            shader=SoftPhongShader(
                                device=device,
                                cameras=camera,
                                lights=lights,
                                materials=mymaterials,
                            ))

    meshes = mesh.extend(num_views)
コード例 #7
0
            image_size=args.window_size,
            blur_radius=0.0,
            faces_per_pixel=1,
            bin_size=
            None,  # this setting controls whether naive or coarse-to-fine rasterization is used
            max_faces_per_bin=None  # this setting is for coarse rasterization
        ))

    # ライトの作成
    lights = PointLights(
        device=device,
        location=[[args.light_pos_x, args.light_pos_y, args.light_pos_z]])

    # マテリアルの作成
    materials = Materials(device=device,
                          specular_color=[[0.0, 0.0, 0.0]],
                          shininess=10.0)

    # シェーダーの作成
    if (args.shader == "soft_silhouette_shader"):
        shader = SoftSilhouetteShader()
    elif (args.shader == "soft_phong_shader"):
        shader = SoftPhongShader(device=device,
                                 cameras=cameras,
                                 lights=lights,
                                 materials=materials)
    elif (args.shader == "textured_soft_phong_shader"):
        # The textured phong shader will interpolate the texture uv coordinates for each vertex, sample from a texture image and apply the Phong lighting model
        shader = TexturedSoftPhongShader(device=device,
                                         cameras=cameras,
                                         lights=lights,
コード例 #8
0
    def init_differential_renderer(self):

        distance = 0.3
        R, T = look_at_view_transform(distance, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=self.device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=self.opt.crop_size,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                perspective_correct=True,
                                                cull_backfaces=True)
        silhouette_raster_settings = RasterizationSettings(
            image_size=self.opt.crop_size,
            blur_radius=0.0,
            faces_per_pixel=1,
            perspective_correct=True,
        )
        # Change specular color to green and change material shininess
        self.materials = Materials(
            device=self.device,
            ambient_color=[[1.0, 1.0, 1.0]],
            specular_color=[[0.0, 0.0, 0.0]],
            diffuse_color=[[1.0, 1.0, 1.0]],
        )
        bp = BlendParams(background_color=(0, 0, 0))  # black
        # bp = BlendParams(background_color=(1, 1, 1))  # white is default

        lights = PointLights(device=self.device, location=((0.0, 0.0, 2.0), ))

        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(
                # blend_params=bp,
                device=self.device,
                lights=lights,
                cameras=cameras,
            ))
        import cv2

        # segmentation_texture_map = cv2.imread(str(Path('resources') / 'part_segmentation_map_2048_gray_n_h.png'))[...,
        segmentation_texture_map = cv2.imread(
            str(Path('resources') /
                'Color_Map_Sag_symmetric.png'))[..., ::-1].astype(np.uint8)
        import matplotlib.pyplot as plt
        plt.imshow(segmentation_texture_map)
        plt.show()

        segmentation_texture_map = (torch.from_numpy(
            np.array(segmentation_texture_map))).unsqueeze(0).float()
        self.segmentation_3d_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=UVsCorrespondenceShader(blend_params=bp,
                                           device=self.device,
                                           cameras=cameras,
                                           colormap=segmentation_texture_map))

        # Create a silhouette mesh renderer by composing a rasterizer and a shader.
        self.silhouette_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=silhouette_raster_settings),
            shader=SoftSilhouetteShader(
                blend_params=BlendParams(sigma=1e-10, gamma=1e-4)))
        self.negative_silhouette_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftSilhouetteShader(
                blend_params=BlendParams(sigma=1e-10, gamma=1e-4)))
        self.texture_data = np.load('smpl_model/texture_data.npy',
                                    allow_pickle=True,
                                    encoding='latin1').item()
        self.verts_uvs1 = torch.tensor(self.texture_data['vt'],
                                       dtype=torch.float32).unsqueeze(0).cuda(
                                           self.device)
        self.faces_uvs1 = torch.tensor(
            self.texture_data['ft'].astype(np.int64),
            dtype=torch.int64).unsqueeze(0).cuda(self.device)