Beispiel #1
0
 def test_blend_params(self):
     """Test color parameter of BlendParams().
     Assert passed value overrides default value.
     """
     bp_default = BlendParams()
     bp_new = BlendParams(background_color=(0.5, 0.5, 0.5))
     self.assertEqual(bp_new.background_color, (0.5, 0.5, 0.5))
     self.assertEqual(bp_default.background_color, (1.0, 1.0, 1.0))
Beispiel #2
0
def soft_feature_blending(colors,
                          fragments,
                          blend_params=None,
                          znear: float = 0.01,
                          zfar: float = 10) -> torch.Tensor:
    """
    Returns:
        Rendered features: (N, H, W, F)
    """
    blend_params = BlendParams() if blend_params is None else blend_params
    eps = 1e-10  # Weight for background
    N, H, W, K = fragments.pix_to_face.shape
    # number of feature channels
    # C = colors.shape[-1]

    mask = fragments.pix_to_face >= 0
    prob_map = torch.sigmoid(-fragments.dists / blend_params.sigma) * mask
    z_inv = (zfar - fragments.zbuf) / (zfar - znear) * mask
    # pyre-fixme[16]: `Tuple` has no attribute `values`.
    # pyre-fixme[6]: Expected `Tensor` for 1st param but got `float`.
    z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=eps)
    # pyre-fixme[6]: Expected `Tensor` for 1st param but got `float`.
    z_prob = torch.exp((z_inv - z_inv_max) / blend_params.gamma)
    z_prob = z_prob / (z_prob.sum(-1, keepdim=True).clamp(eps, 1))

    # For each face, compute the soft assignment score by taking into account xy dist
    # and relative z distance
    # (among different faces assigned to specific xy pixel location)
    colored = 1 - torch.prod(1 - (prob_map * z_prob).unsqueeze(-1) * colors,
                             dim=-2)
    alpha = 1 - torch.prod((1.0 - prob_map), dim=-1)
    return torch.cat([colored, alpha.unsqueeze(-1)], -1)
Beispiel #3
0
    def test_hard_rgb_blend(self):
        N, H, W, K = 5, 10, 10, 20
        pix_to_face = torch.randint(low=-1, high=100, size=(N, H, W, K))
        bary_coords = torch.ones((N, H, W, K, 3))
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,
            zbuf=pix_to_face,  # dummy
            dists=pix_to_face,  # dummy
        )
        colors = torch.randn((N, H, W, K, 3))
        blend_params = BlendParams(1e-4, 1e-4, (0.5, 0.5, 1))
        images = hard_rgb_blend(colors, fragments, blend_params)

        # Examine if the foreground colors are correct.
        is_foreground = pix_to_face[..., 0] >= 0
        self.assertClose(images[is_foreground][:, :3],
                         colors[is_foreground][..., 0, :])

        # Examine if the background colors are correct.
        for i in range(3):  # i.e. RGB
            channel_color = blend_params.background_color[i]
            self.assertTrue(images[~is_foreground][...,
                                                   i].eq(channel_color).all())

        # Examine the alpha channel
        self.assertClose(images[..., 3], (pix_to_face[..., 0] >= 0).float())
Beispiel #4
0
    def test_sigmoid_alpha_blend_manual_gradients(self):
        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 3, 2
        device = torch.device("cuda")
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K),
                                    device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists = torch.randn(size=(N, S, S, K),
                            requires_grad=True,
                            device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists,
        )
        blend_params = BlendParams(sigma=1e-3)
        pix_cols = sigmoid_blend_naive_loop(colors, fragments, blend_params)
        grad_out = torch.randn_like(pix_cols)

        # Backward pass
        pix_cols.backward(grad_out)
        grad_dists = sigmoid_blend_naive_loop_backward(grad_out, pix_cols,
                                                       fragments, blend_params)
        self.assertTrue(torch.allclose(dists.grad, grad_dists, atol=1e-7))
 def setup(self, device):
     R, T = look_at_view_transform(self.viewpoint_distance,
                                   self.viewpoint_elevation,
                                   self.viewpoint_azimuth,
                                   device=device)
     cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
     raster_settings = RasterizationSettings(
         image_size=self.opt.fast_image_size,
         blur_radius=self.opt.raster_blur_radius,
         faces_per_pixel=self.opt.raster_faces_per_pixel,
     )
     rasterizer = MeshRasterizer(cameras=cameras,
                                 raster_settings=raster_settings)
     lights = PointLights(device=device,
                          location=[self.opt.lights_location])
     lights = DirectionalLights(device=device,
                                direction=[self.opt.lights_direction])
     shader = SoftPhongShader(
         device=device,
         cameras=cameras,
         lights=lights,
         blend_params=BlendParams(
             self.opt.blend_params_sigma,
             self.opt.blend_params_gamma,
             self.opt.blend_params_background_color,
         ),
     )
     self.renderer = MeshRenderer(
         rasterizer=rasterizer,
         shader=shader,
     )
Beispiel #6
0
    def test_softmax_rgb_blend(self):
        # Create dummy outputs of rasterization simulating a cube in the center
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
        device = torch.device("cuda")
        pix_to_face = torch.full((N, S, S, K),
                                 fill_value=-1,
                                 dtype=torch.int64,
                                 device=device)
        h = int(S / 2)
        pix_to_face_full = torch.randint(size=(N, h, h, K),
                                         low=0,
                                         high=100,
                                         device=device)
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
        empty = torch.tensor([], device=device)

        random_sign_flip = torch.rand((N, S, S, K), device=device)
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        zbuf1 = torch.randn(size=(N, S, S, K), device=device)

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
        dists1 = torch.randn(size=(N, S, S, K),
                             device=device) * random_sign_flip
        dists2 = dists1.clone()
        zbuf2 = zbuf1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
        colors = torch.randn((N, S, S, K, 3), device=device)
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=zbuf1,
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=zbuf2,
            dists=dists2,
        )

        blend_params = BlendParams(sigma=1e-3)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)
        self._compare_impls(
            softmax_rgb_blend,
            softmax_blend_naive,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
Beispiel #7
0
    def test_softmax_rgb_blend(self):
        # Create dummy outputs of rasterization simulating a cube in the centre
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
        pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64)
        h = int(S / 2)
        pix_to_face_full = torch.randint(size=(N, h, h, K), low=0, high=100)
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
        bary_coords = torch.ones((N, S, S, K, 3))

        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        zbuf1 = torch.randn(size=(N, S, S, K))

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
        dists1 = torch.randn(size=(N, S, S, K)) * random_sign_flip
        dists2 = dists1.clone()
        zbuf2 = zbuf1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
        zbuf1.requires_grad = True
        zbuf2.requires_grad = True
        colors = torch.randn_like(bary_coords)
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=zbuf1,
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=zbuf2,
            dists=dists2,
        )
        blend_params = BlendParams(sigma=1e-1)
        images = softmax_rgb_blend(colors, fragments1, blend_params)
        images_naive = softmax_blend_naive(colors, fragments2, blend_params)
        self.assertTrue(torch.allclose(images, images_naive))

        # Check gradients.
        images.sum().backward()
        self.assertTrue(hasattr(dists1, "grad"))
        self.assertTrue(hasattr(zbuf1, "grad"))
        images_naive.sum().backward()
        self.assertTrue(hasattr(dists2, "grad"))
        self.assertTrue(hasattr(zbuf2, "grad"))

        self.assertTrue(torch.allclose(dists1.grad, dists2.grad, atol=2e-5))
        self.assertTrue(torch.allclose(zbuf1.grad, zbuf2.grad, atol=2e-5))


# Helpful comments below.# Helpful comments below.
Beispiel #8
0
    def test_sigmoid_alpha_blend_python(self):
        """
        Test outputs of python tensorised function and python loop
        """

        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 10, 5
        device = torch.device('cuda')
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K),
                                    device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        dists2 = dists1.detach().clone()
        dists2.requires_grad = True

        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists2,
        )

        blend_params = BlendParams(sigma=1e-2)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)

        self._compare_impls(
            sigmoid_alpha_blend,
            sigmoid_blend_naive_loop,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
Beispiel #9
0
    def render_torch(self,
                     verts,
                     faces,
                     rgb,
                     bcg_color=(1., 1., 1.),
                     get_depth=False,
                     get_alpha=False):
        # b, h, w = grid_3d.shape[:3]
        b = verts.size(0)
        textures = TexturesVertex(verts_features=rgb.view(b, -1, 3))
        mesh = Meshes(verts=verts, faces=faces, textures=textures)

        fragments = self.rasterizer_torch(mesh)
        texels = mesh.sample_textures(fragments)
        materials = Materials(device=verts.device)
        blend_params = BlendParams(background_color=bcg_color)
        images = hard_rgb_blend(texels, fragments, blend_params)
        images = images[..., :3].permute(0, 3, 1, 2)

        out = (images, )
        if get_depth:
            depth = fragments.zbuf[..., 0]
            mask = (depth == -1.0).float()
            max_depth = self.max_depth + 0.5 * (self.max_depth -
                                                self.min_depth)
            depth = mask * max_depth * torch.ones_like(depth) + (1 -
                                                                 mask) * depth
            out = out + (depth, )
        if get_alpha:
            colors = torch.ones_like(fragments.bary_coords)
            blend_params = BlendParams(sigma=1e-2,
                                       gamma=1e-4,
                                       background_color=(1., 1., 1.))
            alpha = sigmoid_alpha_blend(colors, fragments, blend_params)[...,
                                                                         -1]
            out = tuple(out) + (alpha, )
        if len(out) == 1:
            out = out[0]
        return out
Beispiel #10
0
    def test_sigmoid_alpha_blend(self):
        """
        Test outputs of sigmoid alpha blend tensorised function match those of
        the naive iterative version. Also check gradients match.
        """

        # Create dummy outputs of rasterization simulating a cube in the centre
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
        pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64)
        h = int(S / 2)
        pix_to_face_full = torch.randint(size=(N, h, h, K), low=0, high=100)
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
        bary_coords = torch.ones((N, S, S, K, 3))

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists = torch.randn(size=(N, S, S, K))
        dists1 = dists * random_sign_flip
        dists2 = dists1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
        colors = torch.randn_like(bary_coords)
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=pix_to_face,  # dummy
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,  # dummy
            zbuf=pix_to_face,  # dummy
            dists=dists2,
        )
        blend_params = BlendParams(sigma=2e-1)
        images = sigmoid_alpha_blend(colors, fragments1, blend_params)
        images_naive = sigmoid_blend_naive(colors, fragments2, blend_params)
        self.assertTrue(torch.allclose(images, images_naive))

        torch.manual_seed(231)
        images.sum().backward()
        self.assertTrue(hasattr(dists1, "grad"))
        images_naive.sum().backward()
        self.assertTrue(hasattr(dists2, "grad"))

        self.assertTrue(torch.allclose(dists1.grad, dists2.grad, rtol=1e-5))
Beispiel #11
0
    def test_sigmoid_alpha_blend_python(self):
        """
        Test outputs of python tensorised function and python loop
        """

        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 1, 4, 1
        device = torch.device("cuda")
        pix_to_face = torch.randint(low=-1,
                                    high=F,
                                    size=(N, S, S, K),
                                    device=device)
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        dists1 = torch.randn(size=(N, S, S, K), device=device)
        dists2 = dists1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True

        fragments1 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists2,
        )

        blend_params = BlendParams(sigma=1e-2)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)

        self._compare_impls(
            sigmoid_alpha_blend,
            sigmoid_alpha_blend_vectorized,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
Beispiel #12
0
    def bm_softmax_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = 'cpu',
    ):
        if torch.cuda.is_available() and 'cuda:' in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K),
                                    device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K), device=device)
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        zbuf = torch.randn(size=(N, S, S, K),
                           requires_grad=True,
                           device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,
            zbuf=zbuf,
            dists=dists1,  # dummy
        )
        blend_params = BlendParams(sigma=1e-3)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = softmax_rgb_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
Beispiel #13
0
    def bm_softmax_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = "cpu",
        backend: str = "pytorch",
    ):
        if torch.cuda.is_available() and "cuda:" in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(low=-1,
                                    high=F + 1,
                                    size=(N, S, S, K),
                                    device=device)
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        zbuf = torch.randn(size=(N, S, S, K),
                           requires_grad=True,
                           device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,
            zbuf=zbuf,
            dists=dists1  # dummy
        )
        blend_params = BlendParams(sigma=1e-3)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = softmax_rgb_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
    def __init__(self,
                 background_color: Optional[Union[Tuple, List,
                                                  torch.Tensor]] = None,
                 device="cpu",
                 cameras=None,
                 lights=None,
                 materials=None,
                 blend_params=None):
        super().__init__()
        self.background_color = background_color

        self.lights = lights if lights is not None else PointLights(
            device=device)
        self.materials = (materials if materials is not None else Materials(
            device=device))
        self.cameras = cameras
        self.blend_params = blend_params if blend_params is not None else BlendParams(
        )
Beispiel #15
0
    def bm_sigmoid_alpha_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device="cuda",
        backend: str = "pytorch",
    ):
        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(low=-1,
                                    high=F + 1,
                                    size=(N, S, S, K),
                                    device=device)
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        dists1 = torch.randn(size=(N, S, S, K),
                             requires_grad=True,
                             device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        blend_params = BlendParams(sigma=1e-3)

        blend_fn = (sigmoid_alpha_blend_vectorized
                    if backend == "pytorch" else sigmoid_alpha_blend)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = blend_fn(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn