Exemple #1
0
    def test_orthographic(self):
        cameras = OrthographicCameras()
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        projected_verts = vertices.clone()
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)

        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1, projected_verts)
Exemple #2
0
 def test_orthographic_kwargs(self):
     cameras = OrthographicCameras(focal_length=5.0,
                                   principal_point=((2.5, 2.5), ))
     P = cameras.get_projection_transform(focal_length=2.0,
                                          principal_point=((2.5, 3.5), ))
     vertices = torch.randn([3, 4, 3], dtype=torch.float32)
     projected_verts = vertices.clone()
     projected_verts[:, :, :2] *= 2.0
     projected_verts[:, :, 0] += 2.5
     projected_verts[:, :, 1] += 3.5
     v1 = P.transform_points(vertices)
     self.assertClose(v1, projected_verts)
    def test_orthographic_scaled(self):
        focal_length_x = 10.0
        focal_length_y = 15.0

        cameras = OrthographicCameras(focal_length=((focal_length_x, focal_length_y),))
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        projected_verts = vertices.clone()
        projected_verts[:, :, 0] *= focal_length_x
        projected_verts[:, :, 1] *= focal_length_y
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(
            vertices, scale_xyz=(focal_length_x, focal_length_y, 1.0)
        )
        v3 = cameras.transform_points(vertices)
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v3[..., :2], v2[..., :2])
        self.assertClose(v1, projected_verts)
Exemple #4
0
    def test_join_cameras_as_batch_errors(self):
        cam0 = PerspectiveCameras(device="cuda:0")
        cam1 = OrthographicCameras(device="cuda:0")

        # Cameras not of the same type
        with self.assertRaisesRegex(ValueError, "same type"):
            join_cameras_as_batch([cam0, cam1])

        cam2 = OrthographicCameras(device="cpu")
        # Cameras not on the same device
        with self.assertRaisesRegex(ValueError, "same device"):
            join_cameras_as_batch([cam1, cam2])

        cam3 = OrthographicCameras(in_ndc=False, device="cuda:0")
        # Different coordinate systems -- all should be in ndc or in screen
        with self.assertRaisesRegex(
            ValueError, "Attribute _in_ndc is not constant across inputs"
        ):
            join_cameras_as_batch([cam1, cam3])
 def test_simple_sphere_pulsar(self):
     for device in [torch.device("cpu"), torch.device("cuda")]:
         sphere_mesh = ico_sphere(1, device)
         verts_padded = sphere_mesh.verts_padded()
         # Shift vertices to check coordinate frames are correct.
         verts_padded[..., 1] += 0.2
         verts_padded[..., 0] += 0.2
         pointclouds = Pointclouds(
             points=verts_padded, features=torch.ones_like(verts_padded)
         )
         for azimuth in [0.0, 90.0]:
             R, T = look_at_view_transform(2.7, 0.0, azimuth)
             for camera_name, cameras in [
                 ("fovperspective", FoVPerspectiveCameras(device=device, R=R, T=T)),
                 (
                     "fovorthographic",
                     FoVOrthographicCameras(device=device, R=R, T=T),
                 ),
                 ("perspective", PerspectiveCameras(device=device, R=R, T=T)),
                 ("orthographic", OrthographicCameras(device=device, R=R, T=T)),
             ]:
                 raster_settings = PointsRasterizationSettings(
                     image_size=256, radius=5e-2, points_per_pixel=1
                 )
                 rasterizer = PointsRasterizer(
                     cameras=cameras, raster_settings=raster_settings
                 )
                 renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
                 # Load reference image
                 filename = (
                     "pulsar_simple_pointcloud_sphere_"
                     f"azimuth{azimuth}_{camera_name}.png"
                 )
                 image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                 images = renderer(
                     pointclouds, gamma=(1e-3,), znear=(1.0,), zfar=(100.0,)
                 )
                 rgb = images[0, ..., :3].squeeze().cpu()
                 if DEBUG:
                     filename = "DEBUG_%s" % filename
                     Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                         DATA_DIR / filename
                     )
                 self.assertClose(rgb, image_ref, rtol=7e-3, atol=5e-3)
Exemple #6
0
    def test_getitem(self):
        R_matrix = torch.randn((6, 3, 3))
        principal_point = torch.randn((6, 2, 1))
        focal_length = 5.0
        cam = OrthographicCameras(
            R=R_matrix,
            focal_length=focal_length,
            principal_point=principal_point,
        )

        # Check get item returns an instance of the same class
        # with all the same keys
        c0 = cam[0]
        self.assertTrue(isinstance(c0, OrthographicCameras))
        self.assertEqual(cam.__dict__.keys(), c0.__dict__.keys())

        # Check torch.LongTensor index
        index = torch.tensor([1, 3, 5], dtype=torch.int64)
        c135 = cam[index]
        self.assertEqual(len(c135), 3)
        self.assertClose(c135.focal_length, torch.tensor([[5.0, 5.0]] * 3))
        self.assertClose(c135.R, R_matrix[[1, 3, 5], ...])
        self.assertClose(c135.principal_point, principal_point[[1, 3, 5], ...])
Exemple #7
0
 def test_perspective_type(self):
     cam = OrthographicCameras(focal_length=5.0,
                               principal_point=((2.5, 2.5), ))
     self.assertFalse(cam.is_perspective())
     self.assertEquals(cam.get_znear(), None)
 def test_unified_inputs_pulsar(self):
     # Test data on different devices.
     for device in [torch.device("cpu"), torch.device("cuda")]:
         sphere_mesh = ico_sphere(1, device)
         verts_padded = sphere_mesh.verts_padded()
         pointclouds = Pointclouds(
             points=verts_padded, features=torch.ones_like(verts_padded)
         )
         R, T = look_at_view_transform(2.7, 0.0, 0.0)
         # Test the different camera types.
         for _, cameras in [
             ("fovperspective", FoVPerspectiveCameras(device=device, R=R, T=T)),
             (
                 "fovorthographic",
                 FoVOrthographicCameras(device=device, R=R, T=T),
             ),
             ("perspective", PerspectiveCameras(device=device, R=R, T=T)),
             ("orthographic", OrthographicCameras(device=device, R=R, T=T)),
         ]:
             # Test different ways for image size specification.
             for image_size in (256, (256, 256)):
                 raster_settings = PointsRasterizationSettings(
                     image_size=image_size, radius=5e-2, points_per_pixel=1
                 )
                 rasterizer = PointsRasterizer(
                     cameras=cameras, raster_settings=raster_settings
                 )
                 # Test that the compositor can be provided. It's value is ignored
                 # so use a dummy.
                 _ = PulsarPointsRenderer(rasterizer=rasterizer, compositor=1).to(
                     device
                 )
                 # Constructor without compositor.
                 _ = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
                 # Constructor with n_channels.
                 _ = PulsarPointsRenderer(rasterizer=rasterizer, n_channels=3).to(
                     device
                 )
                 # Constructor with max_num_spheres.
                 renderer = PulsarPointsRenderer(
                     rasterizer=rasterizer, max_num_spheres=1000
                 ).to(device)
                 # Test the forward function.
                 if isinstance(cameras, (PerspectiveCameras, OrthographicCameras)):
                     # znear and zfar is required in this case.
                     self.assertRaises(
                         ValueError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds, gamma=(1e-4,)
                         ),
                     )
                     renderer.forward(
                         point_clouds=pointclouds,
                         gamma=(1e-4,),
                         znear=(1.0,),
                         zfar=(2.0,),
                     )
                     # znear and zfar must be batched.
                     self.assertRaises(
                         TypeError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds,
                             gamma=(1e-4,),
                             znear=1.0,
                             zfar=(2.0,),
                         ),
                     )
                     self.assertRaises(
                         TypeError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds,
                             gamma=(1e-4,),
                             znear=(1.0,),
                             zfar=2.0,
                         ),
                     )
                 else:
                     # gamma must be batched.
                     self.assertRaises(
                         TypeError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds, gamma=1e-4
                         ),
                     )
                     renderer.forward(point_clouds=pointclouds, gamma=(1e-4,))
                     # rasterizer width and height change.
                     renderer.rasterizer.raster_settings.image_size = 0
                     self.assertRaises(
                         ValueError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds, gamma=(1e-4,)
                         ),
                     )
Exemple #9
0
    # pespective projection: x=fX/Z assuming px=py=0, normalization of Z
    verts[:, :,
          1] = verts[:, :, 1].clone() * proj_cam[:, :1] / verts[:, :,
                                                                2].clone()
    verts[:, :,
          0] = verts[:, :, 0].clone() * proj_cam[:, :1] / verts[:, :,
                                                                2].clone()
    verts[:, :,
          2] = ((verts[:, :, 2] - verts[:, :, 2].min()) /
                (verts[:, :, 2].max() - verts[:, :, 2].min()) - 0.5).detach()
    verts[:, :, 2] += 10

    features = torch.ones_like(verts)
    point_cloud = Pointclouds(points=verts[:, :, :3],
                              features=torch.Tensor(
                                  mesh.visual.vertex_colors[None]).cuda())

    cameras = OrthographicCameras(device=device)
    raster_settings = PointsRasterizationSettings(image_size=img_size,
                                                  radius=0.005,
                                                  points_per_pixel=10)
    renderer = PointsRenderer(
        rasterizer=PointsRasterizer(cameras=cameras,
                                    raster_settings=raster_settings),
        compositor=AlphaCompositor(background_color=(33, 33, 33)))
    img_pred = renderer(point_cloud)
    frames.append(img_pred[0, :, :, :3].cpu())
    #cv2.imwrite('%s/points%04d.png'%(args.outdir,i), np.asarray(img_pred[0,:,:,:3].cpu())[:,:,::-1])
imageio.mimsave('./output-depth.gif', frames, duration=5. / len(frames))