Пример #1
0
def _render(
    mesh: Meshes,
    name: str,
    dist: float = 3.0,
    elev: float = 10.0,
    azim: float = 0,
    image_size: int = 256,
    pan=None,
    RT=None,
    use_ambient=False,
):
    device = mesh.device
    if RT is not None:
        R, T = RT
    else:
        R, T = look_at_view_transform(dist, elev, azim)
        if pan is not None:
            R, T = rotate_on_spot(R, T, pan)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    raster_settings = RasterizationSettings(image_size=image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1)

    # Init shader settings
    if use_ambient:
        lights = AmbientLights(device=device)
    else:
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

    blend_params = BlendParams(
        sigma=1e-1,
        gamma=1e-4,
        background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
    )
    # Init renderer
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=HardPhongShader(device=device,
                               lights=lights,
                               cameras=cameras,
                               blend_params=blend_params),
    )

    output = renderer(mesh)

    image = (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)

    if DEBUG:
        Image.fromarray(image).save(DATA_DIR / f"glb_{name}_.png")

    return image
    def test_render_cow(self):
        """
        Test a larger textured mesh is rendered correctly in a non square image.
        """
        device = torch.device("cuda:0")
        obj_dir = get_pytorch3d_dir() / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        verts, faces, aux = load_obj(obj_filename,
                                     device=device,
                                     load_textures=True,
                                     texture_wrap=None)
        tex_map = list(aux.texture_images.values())[0]
        tex_map = tex_map[None, ...].to(faces.textures_idx.device)
        textures = TexturesUV(maps=tex_map,
                              faces_uvs=[faces.textures_idx],
                              verts_uvs=[aux.verts_uvs])
        mesh = Meshes(verts=[verts],
                      faces=[faces.verts_idx],
                      textures=textures)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=(512, 1024),
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            ),
        )

        # Load reference image
        image_ref = load_rgb_image("test_cow_image_rectangle.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_cow_image_rectangle.png")

            # NOTE some pixels can be flaky
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            self.assertTrue(cond1)