def test_interpolate_face_attributes_fail(self):
        # 1. A face can only have 3 verts
        #   i.e. face_attributes must have shape (F, 3, D)
        face_attributes = torch.ones(1, 4, 3)
        pix_to_face = torch.ones((1, 1, 1, 1))
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=pix_to_face[..., None].expand(-1, -1, -1, -1, 3),
            zbuf=pix_to_face,
            dists=pix_to_face,
        )
        with self.assertRaises(ValueError):
            interpolate_face_attributes(
                fragments.pix_to_face, fragments.bary_coords, face_attributes
            )

        # 2. pix_to_face must have shape (N, H, W, K)
        pix_to_face = torch.ones((1, 1, 1, 1, 3))
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=pix_to_face,
            zbuf=pix_to_face,
            dists=pix_to_face,
        )
        with self.assertRaises(ValueError):
            interpolate_face_attributes(
                fragments.pix_to_face, fragments.bary_coords, face_attributes
            )
Exemple #2
0
    def test_softmax_rgb_blend(self):
        # Create dummy outputs of rasterization simulating a cube in the center
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
        device = torch.device("cuda")
        pix_to_face = torch.full((N, S, S, K),
                                 fill_value=-1,
                                 dtype=torch.int64,
                                 device=device)
        h = int(S / 2)
        pix_to_face_full = torch.randint(size=(N, h, h, K),
                                         low=0,
                                         high=100,
                                         device=device)
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
        empty = torch.tensor([], device=device)

        random_sign_flip = torch.rand((N, S, S, K), device=device)
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        zbuf1 = torch.randn(size=(N, S, S, K), device=device)

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
        dists1 = torch.randn(size=(N, S, S, K),
                             device=device) * random_sign_flip
        dists2 = dists1.clone()
        zbuf2 = zbuf1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
        colors = torch.randn((N, S, S, K, 3), device=device)
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=zbuf1,
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=zbuf2,
            dists=dists2,
        )

        blend_params = BlendParams(sigma=1e-3)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)
        self._compare_impls(
            softmax_rgb_blend,
            softmax_blend_naive,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
    def test_softmax_rgb_blend(self):
        # Create dummy outputs of rasterization simulating a cube in the centre
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
        pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64)
        h = int(S / 2)
        pix_to_face_full = torch.randint(size=(N, h, h, K), low=0, high=100)
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
        bary_coords = torch.ones((N, S, S, K, 3))

        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        zbuf1 = torch.randn(size=(N, S, S, K))

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
        dists1 = torch.randn(size=(N, S, S, K)) * random_sign_flip
        dists2 = dists1.clone()
        zbuf2 = zbuf1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
        zbuf1.requires_grad = True
        zbuf2.requires_grad = True
        colors = torch.randn_like(bary_coords)
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=zbuf1,
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=zbuf2,
            dists=dists2,
        )
        blend_params = BlendParams(sigma=1e-1)
        images = softmax_rgb_blend(colors, fragments1, blend_params)
        images_naive = softmax_blend_naive(colors, fragments2, blend_params)
        self.assertTrue(torch.allclose(images, images_naive))

        # Check gradients.
        images.sum().backward()
        self.assertTrue(hasattr(dists1, "grad"))
        self.assertTrue(hasattr(zbuf1, "grad"))
        images_naive.sum().backward()
        self.assertTrue(hasattr(dists2, "grad"))
        self.assertTrue(hasattr(zbuf2, "grad"))

        self.assertTrue(torch.allclose(dists1.grad, dists2.grad, atol=2e-5))
        self.assertTrue(torch.allclose(zbuf1.grad, zbuf2.grad, atol=2e-5))


# Helpful comments below.# Helpful comments below.
Exemple #4
0
    def test_sigmoid_alpha_blend_python(self):
        """
        Test outputs of python tensorised function and python loop
        """

        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 10, 5
        device = torch.device('cuda')
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K),
                                    device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        dists2 = dists1.detach().clone()
        dists2.requires_grad = True

        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists2,
        )

        blend_params = BlendParams(sigma=1e-2)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)

        self._compare_impls(
            sigmoid_alpha_blend,
            sigmoid_blend_naive_loop,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
    def test_sigmoid_alpha_blend(self):
        """
        Test outputs of sigmoid alpha blend tensorised function match those of
        the naive iterative version. Also check gradients match.
        """

        # Create dummy outputs of rasterization simulating a cube in the centre
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
        pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64)
        h = int(S / 2)
        pix_to_face_full = torch.randint(size=(N, h, h, K), low=0, high=100)
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
        bary_coords = torch.ones((N, S, S, K, 3))

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists = torch.randn(size=(N, S, S, K))
        dists1 = dists * random_sign_flip
        dists2 = dists1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
        colors = torch.randn_like(bary_coords)
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=pix_to_face,  # dummy
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=pix_to_face,  # dummy
            dists=dists2,
        )
        blend_params = BlendParams(sigma=2e-1)
        images = sigmoid_alpha_blend(colors, fragments1, blend_params)
        images_naive = sigmoid_blend_naive(colors, fragments2, blend_params)
        self.assertTrue(torch.allclose(images, images_naive))

        torch.manual_seed(231)
        images.sum().backward()
        self.assertTrue(hasattr(dists1, "grad"))
        images_naive.sum().backward()
        self.assertTrue(hasattr(dists2, "grad"))

        self.assertTrue(torch.allclose(dists1.grad, dists2.grad, rtol=1e-5))
Exemple #6
0
    def test_sigmoid_alpha_blend_python(self):
        """
        Test outputs of python tensorised function and python loop
        """

        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 1, 4, 1
        device = torch.device("cuda")
        pix_to_face = torch.randint(low=-1,
                                    high=F,
                                    size=(N, S, S, K),
                                    device=device)
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        dists1 = torch.randn(size=(N, S, S, K), device=device)
        dists2 = dists1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True

        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists2,
        )

        blend_params = BlendParams(sigma=1e-2)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)

        self._compare_impls(
            sigmoid_alpha_blend,
            sigmoid_alpha_blend_vectorized,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
Exemple #7
0
    def test_sigmoid_alpha_blend_manual_gradients(self):
        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 3, 2
        device = torch.device("cuda")
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K),
                                    device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists = torch.randn(size=(N, S, S, K),
                            requires_grad=True,
                            device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists,
        )
        blend_params = BlendParams(sigma=1e-3)
        pix_cols = sigmoid_blend_naive_loop(colors, fragments, blend_params)
        grad_out = torch.randn_like(pix_cols)

        # Backward pass
        pix_cols.backward(grad_out)
        grad_dists = sigmoid_blend_naive_loop_backward(grad_out, pix_cols,
                                                       fragments, blend_params)
        self.assertTrue(torch.allclose(dists.grad, grad_dists, atol=1e-7))
Exemple #8
0
    def test_hard_rgb_blend(self):
        N, H, W, K = 5, 10, 10, 20
        pix_to_face = torch.randint(low=-1, high=100, size=(N, H, W, K))
        bary_coords = torch.ones((N, H, W, K, 3))
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,
            zbuf=pix_to_face,  # dummy
            dists=pix_to_face,  # dummy
        )
        colors = torch.randn((N, H, W, K, 3))
        blend_params = BlendParams(1e-4, 1e-4, (0.5, 0.5, 1))
        images = hard_rgb_blend(colors, fragments, blend_params)

        # Examine if the foreground colors are correct.
        is_foreground = pix_to_face[..., 0] >= 0
        self.assertClose(images[is_foreground][:, :3],
                         colors[is_foreground][..., 0, :])

        # Examine if the background colors are correct.
        for i in range(3):  # i.e. RGB
            channel_color = blend_params.background_color[i]
            self.assertTrue(images[~is_foreground][...,
                                                   i].eq(channel_color).all())

        # Examine the alpha channel
        self.assertClose(images[..., 3], (pix_to_face[..., 0] >= 0).float())
Exemple #9
0
    def test_cameras_check(self):
        verts = torch.tensor([[-1, -1, 0], [1, -1, 1], [1, 1, 0], [-1, 1, 1]],
                             dtype=torch.float32)
        faces = torch.tensor([[0, 1, 2], [2, 3, 0]], dtype=torch.int64)
        meshes = Meshes(verts=[verts], faces=[faces])

        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        barycentric_coords = torch.tensor([[0.1, 0.2, 0.7], [0.3, 0.5, 0.2]],
                                          dtype=torch.float32).view(
                                              1, 1, 1, 2, -1)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=torch.ones_like(pix_to_face),
            dists=torch.ones_like(pix_to_face),
        )
        shader_classes = [
            HardFlatShader,
            HardGouraudShader,
            HardPhongShader,
            SoftPhongShader,
        ]

        for shader_class in shader_classes:
            shader = shader_class()

            with self.assertRaises(ValueError):
                shader(fragments, meshes)
    def forward(self, meshes_world, **kwargs) -> torch.Tensor:
        """
        Render a batch of images from a batch of meshes by rasterizing and then shading.

        NOTE: If the blur radius for rasterizaiton is > 0.0, some pixels can have one or more barycentric coordinates lying outside the range [0, 1]. 
        For a pixel with out of bounds barycentric coordinates with respect to a face f, clipping is required before interpolating the texture uv coordinates and z buffer so that the colors and depths are limited to the range for the corresponding face.
        """
        fragments = self.rasterizer(meshes_world, **kwargs)
        raster_setting = kwargs.get("raster_settings",
                                    self.rasterizer.raster_settings)
        if raster_setting.blur_radius > 0.0:
            # TODO: potentially move barycenteric clipping to the rasterizer.
            # If no downstream functions requiers upclipped values.
            # This will avoid uncessary re-interpolation of the z buffer.
            clipped_bary_coords = _clip_barycentric_coordinates(
                fragments.bary_coords)
            clipped_zbuf = _interpolate_zbuf(fragments.pix_to_face,
                                             clipped_bary_coords, meshes_world)
            fragments = Fragments(
                bary_coords=clipped_bary_coords,
                zbuf=clipped_zbuf,
                dists=fragments.dists,
                pix_to_face=fragments.pix_to_face,
            )
        images = self.shader(fragments, meshes_world, **kwargs)
        depth = fragments.zbuf
        return images, depth
    def test_interpolate_attributes(self):
        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        vert_tex = torch.tensor(
            [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32
        )
        tex = TexturesVertex(verts_features=vert_tex[None, :])
        mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        barycentric_coords = torch.tensor(
            [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
        ).view(1, 1, 1, 2, -1)
        expected_vals = torch.tensor(
            [[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32
        ).view(1, 1, 1, 2, -1)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=torch.ones_like(pix_to_face),
            dists=torch.ones_like(pix_to_face),
        )

        verts_features_packed = mesh.textures.verts_features_packed()
        faces_verts_features = verts_features_packed[mesh.faces_packed()]

        texels = interpolate_face_attributes(
            fragments.pix_to_face, fragments.bary_coords, faces_verts_features
        )
        self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
    def test_interpolate_attributes_grad(self):
        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        vert_tex = torch.tensor(
            [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
            dtype=torch.float32,
            requires_grad=True,
        )
        tex = TexturesVertex(verts_features=vert_tex[None, :])
        mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        barycentric_coords = torch.tensor(
            [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
        ).view(1, 1, 1, 2, -1)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=torch.ones_like(pix_to_face),
            dists=torch.ones_like(pix_to_face),
        )
        grad_vert_tex = torch.tensor(
            [[0.3, 0.3, 0.3], [0.9, 0.9, 0.9], [0.5, 0.5, 0.5], [0.3, 0.3, 0.3]],
            dtype=torch.float32,
        )
        verts_features_packed = mesh.textures.verts_features_packed()
        faces_verts_features = verts_features_packed[mesh.faces_packed()]

        texels = interpolate_face_attributes(
            fragments.pix_to_face, fragments.bary_coords, faces_verts_features
        )
        texels.sum().backward()
        self.assertTrue(hasattr(vert_tex, "grad"))
        self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))
Exemple #13
0
 def test_textures_atlas_grad(self):
     N, F, R = 1, 2, 2
     verts = torch.randn((4, 3), dtype=torch.float32)
     faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
     faces_atlas = torch.rand(size=(N, F, R, R, 3), requires_grad=True)
     tex = TexturesAtlas(atlas=faces_atlas)
     mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
     pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
     barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                       dtype=torch.float32).view(
                                           1, 1, 1, 2, -1)
     fragments = Fragments(
         pix_to_face=pix_to_face,
         bary_coords=barycentric_coords,
         zbuf=torch.ones_like(pix_to_face),
         dists=torch.ones_like(pix_to_face),
     )
     texels = mesh.textures.sample_textures(fragments)
     grad_tex = torch.rand_like(texels)
     grad_expected = torch.zeros_like(faces_atlas)
     grad_expected[0, 0, 0, 1, :] = grad_tex[..., 0:1, :]
     grad_expected[0, 1, 1, 0, :] = grad_tex[..., 1:2, :]
     texels.backward(grad_tex)
     self.assertTrue(hasattr(faces_atlas, "grad"))
     self.assertTrue(torch.allclose(faces_atlas.grad, grad_expected))
 def test_interpolate_attributes(self):
     """
     This tests both interpolate_vertex_colors as well as
     interpolate_face_attributes.
     """
     verts = torch.randn((4, 3), dtype=torch.float32)
     faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
     vert_tex = torch.tensor([[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
                             dtype=torch.float32)
     tex = Textures(verts_rgb=vert_tex[None, :])
     mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
     pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
     barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                       dtype=torch.float32).view(
                                           1, 1, 1, 2, -1)
     expected_vals = torch.tensor([[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]],
                                  dtype=torch.float32).view(1, 1, 1, 2, -1)
     fragments = Fragments(
         pix_to_face=pix_to_face,
         bary_coords=barycentric_coords,
         zbuf=torch.ones_like(pix_to_face),
         dists=torch.ones_like(pix_to_face),
     )
     texels = interpolate_vertex_colors(fragments, mesh)
     self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
Exemple #15
0
    def test_sample_texture_atlas(self):
        N, F, R = 1, 2, 2
        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        faces_atlas = torch.rand(size=(N, F, R, R, 3))
        tex = TexturesAtlas(atlas=faces_atlas)
        mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                          dtype=torch.float32).view(
                                              1, 1, 1, 2, -1)
        expected_vals = torch.tensor([[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]],
                                     dtype=torch.float32)
        expected_vals = torch.zeros((1, 1, 1, 2, 3), dtype=torch.float32)
        expected_vals[..., 0, :] = faces_atlas[0, 0, 0, 1, ...]
        expected_vals[..., 1, :] = faces_atlas[0, 1, 1, 0, ...]

        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=torch.ones_like(pix_to_face),
            dists=torch.ones_like(pix_to_face),
        )
        texels = mesh.textures.sample_textures(fragments)
        self.assertTrue(torch.allclose(texels, expected_vals))
Exemple #16
0
    def test_sample_textures_uv(self):
        barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                          dtype=torch.float32).view(
                                              1, 1, 1, 2, -1)
        dummy_verts = torch.zeros(4, 3)
        vert_uvs = torch.tensor([[1, 0], [0, 1], [1, 1], [0, 0]],
                                dtype=torch.float32)
        face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64)
        interpolated_uvs = torch.tensor(
            [[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32)

        # Create a dummy texture map
        H = 2
        W = 2
        x = torch.linspace(0, 1, W).view(1, W).expand(H, W)
        y = torch.linspace(0, 1, H).view(H, 1).expand(H, W)
        tex_map = torch.stack([x, y], dim=2).view(1, H, W, 2)
        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=pix_to_face,
            dists=pix_to_face,
        )

        for align_corners in [True, False]:
            tex = TexturesUV(
                maps=tex_map,
                faces_uvs=[face_uvs],
                verts_uvs=[vert_uvs],
                align_corners=align_corners,
            )
            meshes = Meshes(verts=[dummy_verts],
                            faces=[face_uvs],
                            textures=tex)
            mesh_textures = meshes.textures
            texels = mesh_textures.sample_textures(fragments)

            # Expected output
            pixel_uvs = interpolated_uvs * 2.0 - 1.0
            pixel_uvs = pixel_uvs.view(2, 1, 1, 2)
            tex_map_ = torch.flip(tex_map, [1]).permute(0, 3, 1, 2)
            tex_map_ = torch.cat([tex_map_, tex_map_], dim=0)
            expected_out = F.grid_sample(tex_map_,
                                         pixel_uvs,
                                         align_corners=align_corners,
                                         padding_mode="border")
            self.assertTrue(
                torch.allclose(texels.squeeze(), expected_out.squeeze()))
    def raster_fn():
        fragments = rasterizer(sphere_meshes)

        # Clip bary and reinterpolate
        clipped_bary_coords = _clip_barycentric_coordinates(
            fragments.bary_coords)
        clipped_zbuf = _interpolate_zbuf(fragments.pix_to_face,
                                         clipped_bary_coords, sphere_meshes)
        fragments = Fragments(
            bary_coords=clipped_bary_coords,
            zbuf=clipped_zbuf,
            dists=fragments.dists,
            pix_to_face=fragments.pix_to_face,
        )
        torch.cuda.synchronize()
Exemple #18
0
    def bm_softmax_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = 'cpu',
    ):
        if torch.cuda.is_available() and 'cuda:' in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K),
                                    device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K), device=device)
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        zbuf = torch.randn(size=(N, S, S, K),
                           requires_grad=True,
                           device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,
            zbuf=zbuf,
            dists=dists1,  # dummy
        )
        blend_params = BlendParams(sigma=1e-3)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = softmax_rgb_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
Exemple #19
0
    def bm_softmax_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = "cpu",
        backend: str = "pytorch",
    ):
        if torch.cuda.is_available() and "cuda:" in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(low=-1,
                                    high=F + 1,
                                    size=(N, S, S, K),
                                    device=device)
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        zbuf = torch.randn(size=(N, S, S, K),
                           requires_grad=True,
                           device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,
            zbuf=zbuf,
            dists=dists1  # dummy
        )
        blend_params = BlendParams(sigma=1e-3)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = softmax_rgb_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
Exemple #20
0
def rasterize(R, T, meshes, rasterizer, blur_radius=0):
    # It will automatically update the camera settings -> R, T in rasterizer.camera
    fragments = rasterizer(meshes, R=R, T=T)

    # Copy from pytorch3D source code, try if it is necessary to do gradient decent
    if blur_radius > 0.0:
        clipped_bary_coords = utils._clip_barycentric_coordinates(
            fragments.bary_coords)
        clipped_zbuf = utils._interpolate_zbuf(fragments.pix_to_face,
                                               clipped_bary_coords, meshes)
        fragments = Fragments(
            bary_coords=clipped_bary_coords,
            zbuf=clipped_zbuf,
            dists=fragments.dists,
            pix_to_face=fragments.pix_to_face,
        )
    return fragments
 def _rasterize(self, meshes, image_size, bin_size, blur):
     """
     Simple wrapper around the rasterize function to return
     the fragment data.
     """
     face_idxs, zbuf, bary_coords, pix_dists = rasterize_meshes(
         meshes,
         image_size,
         blur,
         faces_per_pixel=1,
         bin_size=bin_size,
     )
     return Fragments(
         pix_to_face=face_idxs,
         zbuf=zbuf,
         bary_coords=bary_coords,
         dists=pix_dists,
     )
Exemple #22
0
 def test_hard_rgb_blend(self):
     N, H, W, K = 5, 10, 10, 20
     pix_to_face = torch.ones((N, H, W, K))
     bary_coords = torch.ones((N, H, W, K, 3))
     fragments = Fragments(
         pix_to_face=pix_to_face,
         bary_coords=bary_coords,
         zbuf=pix_to_face,  # dummy
         dists=pix_to_face,  # dummy
     )
     colors = bary_coords.clone()
     top_k = torch.randn((K, 3))
     colors[..., :, :] = top_k
     images = hard_rgb_blend(colors, fragments)
     expected_vals = torch.ones((N, H, W, 4))
     pix_cols = torch.ones_like(expected_vals[..., :3]) * top_k[0, :]
     expected_vals[..., :3] = pix_cols
     self.assertTrue(torch.allclose(images, expected_vals))
Exemple #23
0
    def bm_sigmoid_alpha_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device="cuda",
        backend: str = "pytorch",
    ):
        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(low=-1,
                                    high=F + 1,
                                    size=(N, S, S, K),
                                    device=device)
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        blend_params = BlendParams(sigma=1e-3)

        blend_fn = (sigmoid_alpha_blend_vectorized
                    if backend == "pytorch" else sigmoid_alpha_blend)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = blend_fn(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
    def test_cpu(self):
        """
        Test that the output of rendering non square images
        gives the same result as square images. i.e. the
        dists, zbuf, bary are all the same for the square
        region which is present in both images.

        In this test we compare between the naive C++ implementation
        and the naive python implementation as the Coarse/Fine
        method is not fully implemented in C++
        """
        # Test both when (W > H) and (H > W).
        # Using smaller image sizes here as the Python rasterizer is really slow.
        image_sizes = [(32, 64), (64, 32), (60, 110)]
        devices = ["cpu"]
        blurs = [0.0, 0.001]
        batch_sizes = [1]
        test_cases = product(image_sizes, blurs, devices, batch_sizes)

        for image_size, blur, device, batch_size in test_cases:
            # Initialize the verts grad tensor and the meshes objects
            verts_nonsq_naive, meshes_nonsq_naive = self._clone_mesh(
                verts0, faces0, device, batch_size)
            verts_nonsq_python, meshes_nonsq_python = self._clone_mesh(
                verts0, faces0, device, batch_size)

            # Compare Naive CPU with Python as Coarse/Fine rasteriztation
            # is not implemented for CPU
            fragments_naive = self._rasterize(meshes_nonsq_naive,
                                              image_size,
                                              bin_size=0,
                                              blur=blur)
            face_idxs, zbuf, bary_coords, pix_dists = rasterize_meshes_python(
                meshes_nonsq_python,
                image_size,
                blur,
                faces_per_pixel=1,
            )
            fragments_python = Fragments(
                pix_to_face=face_idxs,
                zbuf=zbuf,
                bary_coords=bary_coords,
                dists=pix_dists,
            )

            # Save debug images if DEBUG is set to true at the top of the file.
            _save_debug_image(fragments_naive.pix_to_face, image_size, 0, blur)
            _save_debug_image(fragments_python.pix_to_face, image_size,
                              "python", blur)

            # List of non square outputs to compare with the square output
            nonsq_fragment_gradtensor_list = [
                (fragments_naive, verts_nonsq_naive, "naive"),
                (fragments_python, verts_nonsq_python, "python"),
            ]
            self._compare_square_with_nonsq(
                image_size,
                blur,
                device,
                verts0,
                faces0,
                nonsq_fragment_gradtensor_list,
                batch_size,
            )