示例#1
0
    def test_orthographic(self):
        far = 10.0
        near = 1.0
        cameras = OpenGLOrthographicCameras(znear=near, zfar=far)
        P = cameras.get_projection_transform()

        vertices = torch.tensor([1, 2, far], dtype=torch.float32)
        projected_verts = torch.tensor([1, 2, 1], dtype=torch.float32)
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)
        self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
        self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))

        vertices[..., 2] = near
        projected_verts[2] = 0.0
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)
        self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
        self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
示例#2
0
 def test_orthographic_mixed_inputs_grad(self):
     far = torch.tensor([10.0])
     near = 1.0
     scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True)
     cameras = OpenGLOrthographicCameras(znear=near,
                                         zfar=far,
                                         scale_xyz=scale)
     P = cameras.get_projection_transform()
     vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
     vertices_batch = vertices[None, None, :]
     v1 = P.transform_points(vertices_batch)
     v1.sum().backward()
     self.assertTrue(hasattr(scale, "grad"))
     scale_grad = scale.grad.clone()
     grad_scale = torch.tensor([[
         vertices[0] * P._matrix[:, 0, 0],
         vertices[1] * P._matrix[:, 1, 1],
         vertices[2] * P._matrix[:, 2, 2],
     ]])
     self.assertTrue(torch.allclose(scale_grad, grad_scale))
示例#3
0
文件: rendering.py 项目: neka-nat/csm
    def __init__(self, meshes, image_size=256, device='cuda'):

        super(ColorRenderer, self).__init__()

        self.meshes = meshes
        cameras = OpenGLOrthographicCameras(device=device)

        raster_settings = RasterizationSettings(image_size=image_size,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)
        lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(device=device, lights=lights))
示例#4
0
文件: rendering.py 项目: neka-nat/csm
    def __init__(self, meshes: Meshes, image_size=256):
        """
        Initialization of the Renderer Class. Instances of the mask and depth renderer are create on corresponding
        device.

        :param device: The device, on which the computation is done.
        :param image_size: Image size for the rasterization. Default is 256.
        """
        super().__init__()
        self.meshes = meshes
        device = meshes.device

        # TODO: check how to implement weak perspective (scaled orthographic).
        cameras = OpenGLOrthographicCameras(device=device)

        self._rasterizer = MeshRasterizer(
            cameras=cameras,
            raster_settings=RasterizationSettings(image_size=image_size,
                                                  faces_per_pixel=100))

        self._shader = SoftSilhouetteShader(
            blend_params=(BlendParams(sigma=1e-4, gamma=1e-4)))
示例#5
0
文件: rendering.py 项目: neka-nat/csm
    def __init__(self, meshes: Meshes, device: str, image_size: int = 256):
        """
           Initialization of DepthRenderer. Initialization of the default mesh rasterizer and silhouette shader which is
           used because of simplicity.

           :param device: The device, on which the computation is done, e.g. cpu or cuda.
           :param image_size: Image size for the rasterization. Default is 256.
           :param meshes: A batch of meshes. pytorch3d.structures.Meshes. Dimension meshes in R^N.
                View https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/structures/meshes.py
                for additional information.
       """
        super(DepthRenderer, self).__init__()
        self._meshes = meshes

        # TODO: check how to implement weak perspective (scaled orthographic).
        cameras = OpenGLOrthographicCameras(device=device)

        raster_settings = RasterizationSettings(image_size=image_size)
        self._rasterizer = MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings)

        self._shader = SoftSilhouetteShader(
            blend_params=(BlendParams(sigma=1e-4, gamma=1e-4)))
    def test_pointcloud_with_features(self):
        device = torch.device("cuda:0")
        file_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        pointcloud_filename = file_dir / "PittsburghBridge/pointcloud.npz"

        # Note, this file is too large to check in to the repo.
        # Download the file to run the test locally.
        if not path.exists(pointcloud_filename):
            url = "https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz"
            msg = (
                "pointcloud.npz not found, download from %s, save it at the path %s, and rerun"
                % (url, pointcloud_filename)
            )
            warnings.warn(msg)
            return True

        # Load point cloud
        pointcloud = np.load(pointcloud_filename)
        verts = torch.Tensor(pointcloud["verts"]).to(device)
        rgb_feats = torch.Tensor(pointcloud["rgb"]).to(device)

        verts.requires_grad = True
        rgb_feats.requires_grad = True
        point_cloud = Pointclouds(points=[verts], features=[rgb_feats])

        R, T = look_at_view_transform(20, 10, 0)
        cameras = OpenGLOrthographicCameras(device=device, R=R, T=T, znear=0.01)

        raster_settings = PointsRasterizationSettings(
            # Set image_size so it is not a multiple of 16 (min bin_size)
            # in order to confirm that there are no errors in coarse rasterization.
            image_size=500,
            radius=0.003,
            points_per_pixel=10,
        )

        renderer = PointsRenderer(
            rasterizer=PointsRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            compositor=AlphaCompositor(),
        )

        images = renderer(point_cloud)

        # Load reference image
        filename = "bridge_pointcloud.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(point_cloud)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.detach().numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref, atol=0.015)

        # Check grad exists.
        grad_images = torch.randn_like(images)
        images.backward(grad_images)
        self.assertIsNotNone(verts.grad)
        self.assertIsNotNone(rgb_feats.grad)