Ejemplo n.º 1
0
 def __init__(self):
     super(SceneModel, self).__init__()
     self.gamma = 1.0
     # Points.
     torch.manual_seed(1)
     vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
     vert_pos[:, 2] += 25.0
     vert_pos[:, :2] -= 5.0
     self.register_parameter("vert_pos",
                             nn.Parameter(vert_pos, requires_grad=True))
     self.register_parameter(
         "vert_col",
         nn.Parameter(torch.ones(N_POINTS, 3, dtype=torch.float32) * 0.5,
                      requires_grad=True),
     )
     self.register_parameter(
         "vert_rad",
         nn.Parameter(torch.ones(N_POINTS, dtype=torch.float32) * 0.3,
                      requires_grad=True),
     )
     self.register_buffer(
         "cam_params",
         torch.tensor([0.0, 0.0, 0.0, 0.0, math.pi, 0.0, 5.0, 2.0],
                      dtype=torch.float32),
     )
     # The volumetric optimization works better with a higher number of tracked
     # intersections per ray.
     self.renderer = Renderer(WIDTH,
                              HEIGHT,
                              N_POINTS,
                              n_track=32,
                              right_handed_system=True)
Ejemplo n.º 2
0
    def test_basic_3chan(self):
        """Test rendering one image with one sphere, 3 channels."""
        from pytorch3d.renderer.points.pulsar import Renderer

        LOGGER.info("Setting up rendering test for 3 channels...")
        n_points = 1
        width = 1_000
        height = 1_000
        renderer = Renderer(width, height, n_points)
        vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
        vert_col = torch.tensor([[0.3, 0.5, 0.7]], dtype=torch.float32)
        vert_rad = torch.tensor([1.0], dtype=torch.float32)
        cam_params = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0],
                                  dtype=torch.float32)
        for device in devices:
            vert_pos = vert_pos.to(device)
            vert_col = vert_col.to(device)
            vert_rad = vert_rad.to(device)
            cam_params = cam_params.to(device)
            renderer = renderer.to(device)
            LOGGER.info("Rendering...")
            # Measurements.
            result = renderer.forward(vert_pos, vert_col, vert_rad, cam_params,
                                      1.0e-1, 45.0)
            hits = renderer.forward(
                vert_pos,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
                mode=1,
            )
            if not os.environ.get("FB_TEST", False):
                imageio.imsave(
                    path.join(
                        path.dirname(__file__),
                        "test_out",
                        "test_forward_TestForward_test_basic_3chan.png",
                    ),
                    (result * 255.0).cpu().to(torch.uint8).numpy(),
                )
                imageio.imsave(
                    path.join(
                        path.dirname(__file__),
                        "test_out",
                        "test_forward_TestForward_test_basic_3chan_hits.png",
                    ),
                    (hits * 255.0).cpu().to(torch.uint8).numpy(),
                )
            self.assertEqual(hits[500, 500, 0].item(), 1.0)
            self.assertTrue(
                np.allclose(
                    result[500, 500, :].cpu().numpy(),
                    [0.3, 0.5, 0.7],
                    rtol=1e-2,
                    atol=1e-2,
                ))
Ejemplo n.º 3
0
    def test_basic(self):
        from pytorch3d.renderer.points.pulsar import Renderer

        for device in devices:
            gamma = 1e-5
            max_depth = 15.0
            min_depth = 5.0
            renderer = Renderer(
                256,
                256,
                10000,
                orthogonal_projection=True,
                right_handed_system=False,
                n_channels=1,
            ).to(device)
            data = torch.load(IN_REF_FP, map_location="cpu")
            # For creating the reference files.
            # Use in case of updates.
            # data["pos"] = torch.rand_like(data["pos"])
            # data["pos"][:, 0] = data["pos"][:, 0] * 2. - 1.
            # data["pos"][:, 1] = data["pos"][:, 1] * 2. - 1.
            # data["pos"][:, 2] = data["pos"][:, 2] + 9.5
            result, result_info = renderer.forward(
                data["pos"].to(device),
                data["col"].to(device),
                data["rad"].to(device),
                data["cam_params"].to(device),
                gamma,
                min_depth=min_depth,
                max_depth=max_depth,
                return_forward_info=True,
                bg_col=torch.zeros(1, device=device, dtype=torch.float32),
                percent_allowed_difference=0.01,
            )
            depth_map = Renderer.depth_map_from_result_info_nograd(result_info)
            depth_vis = (depth_map - depth_map[depth_map > 0].min()) * 200 / (
                depth_map.max() - depth_map[depth_map > 0.0].min()) + 50
            if not os.environ.get("FB_TEST", False):
                imageio.imwrite(
                    path.join(
                        path.dirname(__file__),
                        "test_out",
                        "test_depth_test_basic_depth.png",
                    ),
                    depth_vis.cpu().numpy().astype(np.uint8),
                )
            # For creating the reference files.
            # Use in case of updates.
            # torch.save(
            #     data, path.join(path.dirname(__file__), "reference", "nr0000-in.pth")
            # )
            # torch.save(
            #     {"sphere_ids": sphere_ids, "depth_map": depth_map},
            #     path.join(path.dirname(__file__), "reference", "nr0000-out.pth"),
            # )
            # sys.exit(0)
            reference = torch.load(OUT_REF_FP, map_location="cpu")
            self.assertClose(reference["depth_map"].to(device), depth_map)
Ejemplo n.º 4
0
 def __init__(self):
     super(SceneModel, self).__init__()
     self.gamma = 0.1
     # Points.
     torch.manual_seed(1)
     vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
     vert_pos[:, 2] += 25.0
     vert_pos[:, :2] -= 5.0
     self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
     self.register_parameter(
         "vert_col",
         nn.Parameter(
             torch.rand(N_POINTS, 3, dtype=torch.float32), requires_grad=False
         ),
     )
     self.register_parameter(
         "vert_rad",
         nn.Parameter(
             torch.rand(N_POINTS, dtype=torch.float32), requires_grad=False
         ),
     )
     self.register_parameter(
         "cam_pos",
         nn.Parameter(
             torch.tensor([0.1, 0.1, 0.0], dtype=torch.float32), requires_grad=True
         ),
     )
     self.register_parameter(
         "cam_rot",
         # We're using the 6D rot. representation for better gradients.
         nn.Parameter(
             matrix_to_rotation_6d(
                 axis_angle_to_matrix(
                     torch.tensor(
                         [
                             [0.02, math.pi + 0.02, 0.01],
                         ],
                         dtype=torch.float32,
                     )
                 )
             )[0],
             requires_grad=True,
         ),
     )
     self.register_parameter(
         "cam_sensor",
         nn.Parameter(
             torch.tensor([4.8, 1.8], dtype=torch.float32), requires_grad=True
         ),
     )
     self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
Ejemplo n.º 5
0
 def __init__(self):
     super(SceneModel, self).__init__()
     self.gamma = 1.0
     # Points.
     torch.manual_seed(1)
     vert_pos = torch.rand((1, N_POINTS, 3), dtype=torch.float32) * 10.0
     vert_pos[:, :, 2] += 25.0
     vert_pos[:, :, :2] -= 5.0
     self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
     self.register_parameter(
         "vert_col",
         nn.Parameter(
             torch.ones(1, N_POINTS, 3, dtype=torch.float32) * 0.5,
             requires_grad=True,
         ),
     )
     self.register_parameter(
         "vert_rad",
         nn.Parameter(
             torch.ones(1, N_POINTS, dtype=torch.float32) * 0.05, requires_grad=True
         ),
     )
     self.register_parameter(
         "vert_opy",
         nn.Parameter(
             torch.ones(1, N_POINTS, dtype=torch.float32), requires_grad=True
         ),
     )
     self.register_buffer(
         "cam_params",
         torch.tensor(
             [
                 [
                     np.sin(angle) * 35.0,
                     0.0,
                     30.0 - np.cos(angle) * 35.0,
                     0.0,
                     -angle + math.pi,
                     0.0,
                     5.0,
                     2.0,
                 ]
                 for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]
             ],
             dtype=torch.float32,
         ),
     )
     self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
Ejemplo n.º 6
0
    def __init__(self):
        super(SceneModel, self).__init__()
        from pytorch3d.renderer.points.pulsar import Renderer

        self.gamma = 1.0
        # Points.
        torch.manual_seed(1)
        vert_pos = torch.rand((1, n_points, 3), dtype=torch.float32) * 10.0
        vert_pos[:, :, 2] += 25.0
        vert_pos[:, :, :2] -= 5.0
        self.register_parameter("vert_pos",
                                nn.Parameter(vert_pos, requires_grad=False))
        self.register_parameter(
            "vert_col",
            nn.Parameter(torch.zeros(1, n_points, 3, dtype=torch.float32),
                         requires_grad=True),
        )
        self.register_parameter(
            "vert_rad",
            nn.Parameter(
                torch.ones(1, n_points, dtype=torch.float32) * 0.001,
                requires_grad=False,
            ),
        )
        self.register_parameter(
            "vert_opy",
            nn.Parameter(torch.ones(1, n_points, dtype=torch.float32),
                         requires_grad=False),
        )
        self.register_buffer(
            "cam_params",
            torch.tensor(
                [[
                    np.sin(angle) * 35.0,
                    0.0,
                    30.0 - np.cos(angle) * 35.0,
                    0.0,
                    -angle,
                    0.0,
                    5.0,
                    2.0,
                ] for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]],
                dtype=torch.float32,
            ),
        )
        self.renderer = Renderer(width, height, n_points)
Ejemplo n.º 7
0
class SceneModel(nn.Module):
    """
    A simple scene model to demonstrate use of pulsar in PyTorch modules.

    The scene model is parameterized with sphere locations (vert_pos),
    channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
    camera rotation (cam_rot) and sensor focal length and width (cam_sensor).

    The forward method of the model renders this scene description. Any
    of these parameters could instead be passed as inputs to the forward
    method and come from a different model.
    """

    def __init__(self):
        super(SceneModel, self).__init__()
        self.gamma = 1.0
        # Points.
        torch.manual_seed(1)
        vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
        vert_pos[:, 2] += 25.0
        vert_pos[:, :2] -= 5.0
        self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
        self.register_parameter(
            "vert_col",
            nn.Parameter(
                torch.ones(N_POINTS, 3, dtype=torch.float32) * 0.5, requires_grad=True
            ),
        )
        self.register_parameter(
            "vert_rad",
            nn.Parameter(
                torch.ones(N_POINTS, dtype=torch.float32) * 0.3, requires_grad=True
            ),
        )
        self.register_buffer(
            "cam_params",
            torch.tensor(
                [0.0, 0.0, 0.0, 0.0, math.pi, 0.0, 5.0, 2.0], dtype=torch.float32
            ),
        )
        # The volumetric optimization works better with a higher number of tracked
        # intersections per ray.
        self.renderer = Renderer(
            WIDTH, HEIGHT, N_POINTS, n_track=32, right_handed_system=True
        )

    def forward(self):
        return self.renderer.forward(
            self.vert_pos,
            self.vert_col,
            self.vert_rad,
            self.cam_params,
            self.gamma,
            45.0,
            return_forward_info=True,
        )
Ejemplo n.º 8
0
    class Model(nn.Module):
        """A dummy model to test the integration into a stacked model."""
        def __init__(self):
            super(Model, self).__init__()
            self.gamma = 0.1
            self.renderer = Renderer(width, height, n_points)

        def forward(self, vp, vc, vr, cam_params):
            # self.gamma *= 0.995
            # print("gamma: ", self.gamma)
            return self.renderer.forward(vp, vc, vr, cam_params, self.gamma,
                                         45.0)
Ejemplo n.º 9
0
def _bm_pulsar_backward():
    n_points = 1_000_000
    width = 1_000
    height = 1_000
    renderer = Renderer(width, height, n_points)
    # Generate sample data.
    torch.manual_seed(1)
    vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
    vert_pos[:, 2] += 25.0
    vert_pos[:, :2] -= 5.0
    vert_col = torch.rand(n_points, 3, dtype=torch.float32)
    vert_rad = torch.rand(n_points, dtype=torch.float32)
    cam_params = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0],
                              dtype=torch.float32)
    device = torch.device("cuda")
    vert_pos = vert_pos.to(device)
    vert_col = vert_col.to(device)
    vert_rad = vert_rad.to(device)
    cam_params = cam_params.to(device)
    renderer = renderer.to(device)
    vert_pos_var = Variable(vert_pos, requires_grad=True)
    vert_col_var = Variable(vert_col, requires_grad=True)
    vert_rad_var = Variable(vert_rad, requires_grad=True)
    cam_params_var = Variable(cam_params, requires_grad=True)
    res = renderer.forward(
        vert_pos_var,
        vert_col_var,
        vert_rad_var,
        cam_params_var,
        1.0e-1,
        45.0,
        percent_allowed_difference=0.01,
    )
    loss = res.sum()

    def bm_closure():
        loss.backward(retain_graph=True)
        torch.cuda.synchronize()

    return bm_closure
Ejemplo n.º 10
0
def cli():
    """
    Basic example for the pulsar sphere renderer.

    Writes to `basic.png`.
    """
    LOGGER.info("Rendering on GPU...")
    torch.manual_seed(1)
    n_points = 10
    width = 1_000
    height = 1_000
    device = torch.device("cuda")
    # The PyTorch3D system is right handed; in pulsar you can choose the handedness.
    # For easy reproducibility we use a right handed coordinate system here.
    renderer = Renderer(width, height, n_points,
                        right_handed_system=True).to(device)
    # Generate sample data.
    vert_pos = torch.rand(n_points, 3, dtype=torch.float32,
                          device=device) * 10.0
    vert_pos[:, 2] += 25.0
    vert_pos[:, :2] -= 5.0
    vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
    vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
    cam_params = torch.tensor(
        [
            0.0,
            0.0,
            0.0,  # Position 0, 0, 0 (x, y, z).
            0.0,
            math.
            pi,  # Because of the right handed system, the camera must look 'back'.
            0.0,  # Rotation 0, 0, 0 (in axis-angle format).
            5.0,  # Focal length in world size.
            2.0,  # Sensor size in world size.
        ],
        dtype=torch.float32,
        device=device,
    )
    # Render.
    image = renderer(
        vert_pos,
        vert_col,
        vert_rad,
        cam_params,
        1.0e-1,  # Renderer blending parameter gamma, in [1., 1e-5].
        45.0,  # Maximum depth.
    )
    LOGGER.info("Writing image to `%s`.", path.abspath("basic.png"))
    imageio.imsave("basic.png",
                   (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
    LOGGER.info("Done.")
Ejemplo n.º 11
0
 def __init__(self):
     super(Model, self).__init__()
     self.gamma = 0.1
     self.renderer = Renderer(width, height, n_points)
Ejemplo n.º 12
0
    def test_basic(self):
        """Basic forward test."""
        from pytorch3d.renderer.points.pulsar import Renderer

        n_points = 10
        width = 1000
        height = 1000
        renderer_left = Renderer(width,
                                 height,
                                 n_points,
                                 right_handed_system=False)
        renderer_right = Renderer(width,
                                  height,
                                  n_points,
                                  right_handed_system=True)
        # Generate sample data.
        torch.manual_seed(1)
        vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
        vert_pos[:, 2] += 25.0
        vert_pos[:, :2] -= 5.0
        vert_pos_neg = vert_pos.clone()
        vert_pos_neg[:, 2] *= -1.0
        vert_col = torch.rand(n_points, 3, dtype=torch.float32)
        vert_rad = torch.rand(n_points, dtype=torch.float32)
        cam_params = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0],
                                  dtype=torch.float32)
        for device in devices:
            vert_pos = vert_pos.to(device)
            vert_pos_neg = vert_pos_neg.to(device)
            vert_col = vert_col.to(device)
            vert_rad = vert_rad.to(device)
            cam_params = cam_params.to(device)
            renderer_left = renderer_left.to(device)
            renderer_right = renderer_right.to(device)
            result_left = (renderer_left.forward(
                vert_pos,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
            ).cpu().detach().numpy())
            hits_left = (renderer_left.forward(
                vert_pos,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
                mode=1,
            ).cpu().detach().numpy())
            result_right = (renderer_right.forward(
                vert_pos_neg,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
            ).cpu().detach().numpy())
            hits_right = (renderer_right.forward(
                vert_pos_neg,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
                mode=1,
            ).cpu().detach().numpy())
            self.assertClose(result_left, result_right)
            self.assertClose(hits_left, hits_right)
Ejemplo n.º 13
0
class SceneModel(nn.Module):
    """
    A simple scene model to demonstrate use of pulsar in PyTorch modules.

    The scene model is parameterized with sphere locations (vert_pos),
    channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
    camera rotation (cam_rot) and sensor focal length and width (cam_sensor).

    The forward method of the model renders this scene description. Any
    of these parameters could instead be passed as inputs to the forward
    method and come from a different model. Optionally, camera parameters can
    be provided to the forward method in which case the scene is rendered
    using those parameters.
    """

    def __init__(self):
        super(SceneModel, self).__init__()
        self.gamma = 1.0
        # Points.
        torch.manual_seed(1)
        vert_pos = torch.rand((1, N_POINTS, 3), dtype=torch.float32) * 10.0
        vert_pos[:, :, 2] += 25.0
        vert_pos[:, :, :2] -= 5.0
        self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
        self.register_parameter(
            "vert_col",
            nn.Parameter(
                torch.ones(1, N_POINTS, 3, dtype=torch.float32) * 0.5,
                requires_grad=True,
            ),
        )
        self.register_parameter(
            "vert_rad",
            nn.Parameter(
                torch.ones(1, N_POINTS, dtype=torch.float32) * 0.05, requires_grad=True
            ),
        )
        self.register_parameter(
            "vert_opy",
            nn.Parameter(
                torch.ones(1, N_POINTS, dtype=torch.float32), requires_grad=True
            ),
        )
        self.register_buffer(
            "cam_params",
            torch.tensor(
                [
                    [
                        np.sin(angle) * 35.0,
                        0.0,
                        30.0 - np.cos(angle) * 35.0,
                        0.0,
                        -angle + math.pi,
                        0.0,
                        5.0,
                        2.0,
                    ]
                    for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]
                ],
                dtype=torch.float32,
            ),
        )
        self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)

    def forward(self, cam=None):
        if cam is None:
            cam = self.cam_params
            n_views = 8
        else:
            n_views = 1
        return self.renderer.forward(
            self.vert_pos.expand(n_views, -1, -1),
            self.vert_col.expand(n_views, -1, -1),
            self.vert_rad.expand(n_views, -1),
            cam,
            self.gamma,
            45.0,
        )
Ejemplo n.º 14
0
    def test_principal_point(self):
        """Test shifting the principal point."""
        from pytorch3d.renderer.points.pulsar import Renderer

        LOGGER.info("Setting up rendering test for shifted principal point...")
        n_points = 1
        width = 1_000
        height = 1_000
        renderer = Renderer(width, height, n_points, n_channels=1)
        vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
        vert_col = torch.tensor([[0.0]], dtype=torch.float32)
        vert_rad = torch.tensor([1.0], dtype=torch.float32)
        cam_params = torch.tensor(
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0, 0.0, 0.0],
            dtype=torch.float32)
        for device in devices:
            vert_pos = vert_pos.to(device)
            vert_col = vert_col.to(device)
            vert_rad = vert_rad.to(device)
            cam_params = cam_params.to(device)
            cam_params[-2] = -250.0
            cam_params[-1] = -250.0
            renderer = renderer.to(device)
            LOGGER.info("Rendering...")
            # Measurements.
            result = renderer.forward(vert_pos, vert_col, vert_rad, cam_params,
                                      1.0e-1, 45.0)
            if not os.environ.get("FB_TEST", False):
                imageio.imsave(
                    path.join(
                        path.dirname(__file__),
                        "test_out",
                        "test_forward_TestForward_test_principal_point.png",
                    ),
                    (result * 255.0).cpu().to(torch.uint8).numpy(),
                )
            self.assertTrue(
                np.allclose(result[750, 750, :].cpu().numpy(), [0.0],
                            rtol=1e-2,
                            atol=1e-2))
        for device in devices:
            vert_pos = vert_pos.to(device)
            vert_col = vert_col.to(device)
            vert_rad = vert_rad.to(device)
            cam_params = cam_params.to(device)
            cam_params[-2] = 250.0
            cam_params[-1] = 250.0
            renderer = renderer.to(device)
            LOGGER.info("Rendering...")
            # Measurements.
            result = renderer.forward(vert_pos, vert_col, vert_rad, cam_params,
                                      1.0e-1, 45.0)
            if not os.environ.get("FB_TEST", False):
                imageio.imsave(
                    path.join(
                        path.dirname(__file__),
                        "test_out",
                        "test_forward_TestForward_test_principal_point.png",
                    ),
                    (result * 255.0).cpu().to(torch.uint8).numpy(),
                )
            self.assertTrue(
                np.allclose(result[250, 250, :].cpu().numpy(), [0.0],
                            rtol=1e-2,
                            atol=1e-2))
Ejemplo n.º 15
0
class SceneModel(nn.Module):
    """A simple model to demonstrate use in Modules."""
    def __init__(self):
        super(SceneModel, self).__init__()
        from pytorch3d.renderer.points.pulsar import Renderer

        self.gamma = 1.0
        # Points.
        torch.manual_seed(1)
        vert_pos = torch.rand((1, n_points, 3), dtype=torch.float32) * 10.0
        vert_pos[:, :, 2] += 25.0
        vert_pos[:, :, :2] -= 5.0
        self.register_parameter("vert_pos",
                                nn.Parameter(vert_pos, requires_grad=False))
        self.register_parameter(
            "vert_col",
            nn.Parameter(torch.zeros(1, n_points, 3, dtype=torch.float32),
                         requires_grad=True),
        )
        self.register_parameter(
            "vert_rad",
            nn.Parameter(
                torch.ones(1, n_points, dtype=torch.float32) * 0.001,
                requires_grad=False,
            ),
        )
        self.register_parameter(
            "vert_opy",
            nn.Parameter(torch.ones(1, n_points, dtype=torch.float32),
                         requires_grad=False),
        )
        self.register_buffer(
            "cam_params",
            torch.tensor(
                [[
                    np.sin(angle) * 35.0,
                    0.0,
                    30.0 - np.cos(angle) * 35.0,
                    0.0,
                    -angle,
                    0.0,
                    5.0,
                    2.0,
                ] for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]],
                dtype=torch.float32,
            ),
        )
        self.renderer = Renderer(width, height, n_points)

    def forward(self, cam=None):
        if cam is None:
            cam = self.cam_params
            n_views = 8
        else:
            n_views = 1
        return self.renderer.forward(
            self.vert_pos.expand(n_views, -1, -1),
            self.vert_col.expand(n_views, -1, -1),
            self.vert_rad.expand(n_views, -1),
            cam,
            self.gamma,
            45.0,
            return_forward_info=True,
        )
Ejemplo n.º 16
0
class SceneModel(nn.Module):
    """
    A simple scene model to demonstrate use of pulsar in PyTorch modules.

    The scene model is parameterized with sphere locations (vert_pos),
    channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
    camera rotation (cam_rot) and sensor focal length and width (cam_sensor).

    The forward method of the model renders this scene description. Any
    of these parameters could instead be passed as inputs to the forward
    method and come from a different model.
    """

    def __init__(self):
        super(SceneModel, self).__init__()
        self.gamma = 0.1
        # Points.
        torch.manual_seed(1)
        vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
        vert_pos[:, 2] += 25.0
        vert_pos[:, :2] -= 5.0
        self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
        self.register_parameter(
            "vert_col",
            nn.Parameter(
                torch.rand(N_POINTS, 3, dtype=torch.float32), requires_grad=False
            ),
        )
        self.register_parameter(
            "vert_rad",
            nn.Parameter(
                torch.rand(N_POINTS, dtype=torch.float32), requires_grad=False
            ),
        )
        self.register_parameter(
            "cam_pos",
            nn.Parameter(
                torch.tensor([0.1, 0.1, 0.0], dtype=torch.float32), requires_grad=True
            ),
        )
        self.register_parameter(
            "cam_rot",
            # We're using the 6D rot. representation for better gradients.
            nn.Parameter(
                matrix_to_rotation_6d(
                    axis_angle_to_matrix(
                        torch.tensor(
                            [
                                [0.02, math.pi + 0.02, 0.01],
                            ],
                            dtype=torch.float32,
                        )
                    )
                )[0],
                requires_grad=True,
            ),
        )
        self.register_parameter(
            "cam_sensor",
            nn.Parameter(
                torch.tensor([4.8, 1.8], dtype=torch.float32), requires_grad=True
            ),
        )
        self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)

    def forward(self):
        return self.renderer.forward(
            self.vert_pos,
            self.vert_col,
            self.vert_rad,
            torch.cat([self.cam_pos, self.cam_rot, self.cam_sensor]),
            self.gamma,
            45.0,
        )
Ejemplo n.º 17
0
    def test_basic(self):
        """Basic forward test."""
        from pytorch3d.renderer.points.pulsar import Renderer
        import torch

        n_points = 10
        width = 1_000
        height = 1_000
        renderer_1 = Renderer(width, height, n_points, n_channels=1)
        renderer_3 = Renderer(width, height, n_points, n_channels=3)
        renderer_8 = Renderer(width, height, n_points, n_channels=8)
        # Generate sample data.
        torch.manual_seed(1)
        vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
        vert_pos[:, 2] += 25.0
        vert_pos[:, :2] -= 5.0
        vert_col = torch.rand(n_points, 8, dtype=torch.float32)
        vert_rad = torch.rand(n_points, dtype=torch.float32)
        cam_params = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0],
                                  dtype=torch.float32)
        for device in devices:
            vert_pos = vert_pos.to(device)
            vert_col = vert_col.to(device)
            vert_rad = vert_rad.to(device)
            cam_params = cam_params.to(device)
            renderer_1 = renderer_1.to(device)
            renderer_3 = renderer_3.to(device)
            renderer_8 = renderer_8.to(device)
            result_1 = (renderer_1.forward(
                vert_pos,
                vert_col[:, :1],
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
            ).cpu().detach().numpy())
            hits_1 = (renderer_1.forward(
                vert_pos,
                vert_col[:, :1],
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
                mode=1,
            ).cpu().detach().numpy())
            result_3 = (renderer_3.forward(
                vert_pos,
                vert_col[:, :3],
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
            ).cpu().detach().numpy())
            hits_3 = (renderer_3.forward(
                vert_pos,
                vert_col[:, :3],
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
                mode=1,
            ).cpu().detach().numpy())
            result_8 = (renderer_8.forward(
                vert_pos,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
            ).cpu().detach().numpy())
            hits_8 = (renderer_8.forward(
                vert_pos,
                vert_col,
                vert_rad,
                cam_params,
                1.0e-1,
                45.0,
                percent_allowed_difference=0.01,
                mode=1,
            ).cpu().detach().numpy())
            self.assertClose(result_1, result_3[:, :, :1])
            self.assertClose(result_3, result_8[:, :, :3])
            self.assertClose(hits_1, hits_3)
            self.assertClose(hits_8, hits_3)