Beispiel #1
0
    def save(
        self,
        data: Pointclouds,
        path: Union[str, Path],
        path_manager: PathManager,
        binary: Optional[bool],
        decimal_places: Optional[int] = None,
        **kwargs,
    ) -> bool:
        if not endswith(path, self.known_suffixes):
            return False

        points = data.points_list()[0]
        features = data.features_list()[0]
        with _open_file(path, path_manager, "wb") as f:
            _save_ply(
                f=f,
                verts=points,
                verts_colors=features,
                verts_normals=torch.FloatTensor([]),
                faces=None,
                ascii=binary is False,
                decimal_places=decimal_places,
            )
        return True
Beispiel #2
0
    def forward(self, pts, features):
        # Make sure pts and features are equal
        assert pts.size(2) == 3
        assert pts.size(1) == features.size(1)

        pts[:, :, 0] = -pts[:, :, 0]
        pts[:, :, 1] = -pts[:, :, 1]

        radius = float(self.radius) / float(self.img_size) * 2.0
        params = compositing.CompositeParams(radius=radius)

        pointcloud = Pointclouds(points=pts, features=features)
        points_idx, _, dist = rasterize_points(pointcloud, self.img_size,
                                               radius, self.points_per_pixel)

        dist = dist / pow(radius, 2)

        alphas = (1 - dist.clamp(max=1, min=1e-3).pow(0.5).pow(
            self.gamma).permute(0, 3, 1, 2))

        transformed_feature_alphas = compositing.alpha_composite(
            points_idx.permute(0, 3, 1, 2).long(), alphas,
            pointcloud.features_packed().permute(1, 0), params)

        return transformed_feature_alphas
Beispiel #3
0
    def forward(self, pts3D, src):
        bs = src.size(0)
        if len(src.size()) > 3:
            bs, c, w, _ = src.size()
            image_size = w

            pts3D = pts3D.permute(0, 2, 1)
            src = src.unsqueeze(2).repeat(1, 1, w, 1, 1).view(bs, c, -1)
        else:
            bs = src.size(0)
            image_size = self.size

        # Make sure these have been arranged in the same way
        assert pts3D.size(2) == 3
        assert pts3D.size(1) == src.size(2)

        pts3D[:, :, 1] = -pts3D[:, :, 1]
        pts3D[:, :, 0] = -pts3D[:, :, 0]

        # Add on the default feature to the end of the src
        # src = torch.cat((src, self.default_feature.repeat(bs, 1, 1)), 2)

        radius = float(self.radius) / float(image_size) * 2.0

        pts3D = Pointclouds(points=pts3D, features=src.permute(0, 2, 1))
        points_idx, _, dist = rasterize_points(pts3D, image_size, radius,
                                               self.points_per_pixel)

        if os.environ["DEBUG"]:
            print("Max dist: ", dist.max(), pow(radius, self.opts.rad_pow))

        dist = dist / pow(radius, self.opts.rad_pow)

        if os.environ["DEBUG"]:
            print("Max dist: ", dist.max())

        alphas = ((1 - dist.clamp(max=1, min=1e-3).pow(0.5)).pow(
            self.opts.tau).permute(0, 3, 1, 2))

        if self.opts.accumulation == 'alphacomposite':
            transformed_src_alphas = compositing.alpha_composite(
                points_idx.permute(0, 3, 1, 2).long(),
                alphas,
                pts3D.features_packed().permute(1, 0),
            )
        elif self.opts.accumulation == 'wsum':
            transformed_src_alphas = compositing.weighted_sum(
                points_idx.permute(0, 3, 1, 2).long(),
                alphas,
                pts3D.features_packed().permute(1, 0),
            )
        elif self.opts.accumulation == 'wsumnorm':
            transformed_src_alphas = compositing.weighted_sum_norm(
                points_idx.permute(0, 3, 1, 2).long(),
                alphas,
                pts3D.features_packed().permute(1, 0),
            )

        return transformed_src_alphas
 def _clone_pointcloud(verts0, device, batch_size):
     """
     Helper function to detach and clone the verts.
     This is needed in order to set up the tensors for
     gradient computation in different tests.
     """
     verts = verts0.detach().clone()
     verts.requires_grad = True
     pointclouds = Pointclouds(points=[verts])
     pointclouds = pointclouds.to(device).extend(batch_size)
     return verts, pointclouds
Beispiel #5
0
 def __call__(self, points, translate=True):
     assert len(points.shape) == 3 and points.shape[-1] == 3
     bs = points.size(0)
     rgb = torch.ones((bs, points.size(1), 3),
                      device=points.device) * self.max_brightness
     if translate:
         tm = points.mean(dim=-2, keepdim=False)
         T = T3.Translate(-tm, device=points.device)
         points = T.transform_points(points)
         # There's error on normals
         # Probably not needed on just translation
         # normals = T.transform_normals(normals)
     cloud = Pointclouds(points=points, features=rgb)
     gamma = [self.opt.pulsar_gamma for _ in range(bs)]
     cameras = self.renderer.rasterizer.cameras
     R = cameras.R.expand(bs, -1, -1)
     T = cameras.T.expand(bs, -1)
     znear = cameras.znear.expand(bs)
     zfar = cameras.zfar.expand(bs)
     fov = cameras.fov.expand(bs)
     aspect_ratio = cameras.aspect_ratio.expand(bs)
     return self.renderer(cloud,
                          gamma=gamma,
                          R=R,
                          T=T,
                          znear=znear,
                          zfar=zfar,
                          fov=fov,
                          aspect_ratio=aspect_ratio)
    def test_points_image_size_arg(self):
        points = Pointclouds([verts0])

        with self.assertRaises(ValueError) as cm:
            rasterize_points(
                points,
                (100, 200, 3),
                0.0001,
                points_per_pixel=1,
            )
            self.assertTrue("tuple/list of (H, W)" in cm.msg)

        with self.assertRaises(ValueError) as cm:
            rasterize_points(
                points,
                (0, 10),
                0.0001,
                points_per_pixel=1,
            )
            self.assertTrue("sizes must be positive" in cm.msg)

        with self.assertRaises(ValueError) as cm:
            rasterize_points(
                points,
                (100.5, 120.5),
                0.0001,
                points_per_pixel=1,
            )
            self.assertTrue("sizes must be integers" in cm.msg)
Beispiel #7
0
def visualize_nerf_outputs(nerf_out: dict, output_cache: List, viz: Visdom,
                           visdom_env: str):
    """
    Visualizes the outputs of the `RadianceFieldRenderer`.

    Args:
        nerf_out: An output of the validation rendering pass.
        output_cache: A list with outputs of several training render passes.
        viz: A visdom connection object.
        visdom_env: The name of visdom environment for visualization.
    """

    # Show the training images.
    ims = torch.stack([o["image"] for o in output_cache])
    ims = torch.cat(list(ims), dim=1)
    viz.image(
        ims.permute(2, 0, 1),
        env=visdom_env,
        win="images",
        opts={"title": "train_images"},
    )

    # Show the coarse and fine renders together with the ground truth images.
    ims_full = torch.cat(
        [
            nerf_out[imvar][0].permute(2, 0, 1).detach().cpu().clamp(0.0, 1.0)
            for imvar in ("rgb_coarse", "rgb_fine", "rgb_gt")
        ],
        dim=2,
    )
    viz.image(
        ims_full,
        env=visdom_env,
        win="images_full",
        opts={"title": "coarse | fine | target"},
    )

    # Make a 3D plot of training cameras and their emitted rays.
    camera_trace = {
        f"camera_{ci:03d}": o["camera"].cpu()
        for ci, o in enumerate(output_cache)
    }
    ray_pts_trace = {
        f"ray_pts_{ci:03d}": Pointclouds(
            ray_bundle_to_ray_points(
                o["coarse_ray_bundle"]).detach().cpu().view(1, -1, 3))
        for ci, o in enumerate(output_cache)
    }
    plotly_plot = plot_scene(
        {
            "training_scene": {
                **camera_trace,
                **ray_pts_trace,
            },
        },
        pointcloud_max_points=5000,
        pointcloud_marker_size=1,
        camera_scale=0.3,
    )
    viz.plotlyplot(plotly_plot, env=visdom_env, win="scenes")
Beispiel #8
0
    def test_render_pointcloud(self):
        """
        Test a textured point cloud is rendered correctly in a non square image.
        """
        device = torch.device("cuda:0")
        pointclouds = Pointclouds(
            points=[torus_points * 2.0],
            features=torch.ones_like(torus_points[None, ...]),
        ).to(device)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=(512, 1024), radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = AlphaCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        image_ref = load_rgb_image("test_pointcloud_rectangle_image.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(pointclouds)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_pointcloud_rectangle_image.png"
                )

            # NOTE some pixels can be flaky
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            self.assertTrue(cond1)
    def test_texture_sampling_cow(self):
        # test texture sampling for the cow example by converting
        # the cow mesh and its texture uv to a pointcloud with texture

        device = torch.device("cuda:0")
        obj_dir = get_pytorch3d_dir() / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        for text_type in ("uv", "atlas"):
            # Load mesh + texture
            if text_type == "uv":
                mesh = load_objs_as_meshes(
                    [obj_filename], device=device, load_textures=True, texture_wrap=None
                )
            elif text_type == "atlas":
                mesh = load_objs_as_meshes(
                    [obj_filename],
                    device=device,
                    load_textures=True,
                    create_texture_atlas=True,
                    texture_atlas_size=8,
                    texture_wrap=None,
                )

            points, normals, textures = sample_points_from_meshes(
                mesh, num_samples=50000, return_normals=True, return_textures=True
            )
            pointclouds = Pointclouds(points, normals=normals, features=textures)

            for pos in ("front", "back"):
                # Init rasterizer settings
                if pos == "back":
                    azim = 0.0
                elif pos == "front":
                    azim = 180
                R, T = look_at_view_transform(2.7, 0, azim)
                cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

                raster_settings = PointsRasterizationSettings(
                    image_size=512, radius=1e-2, points_per_pixel=1
                )

                rasterizer = PointsRasterizer(
                    cameras=cameras, raster_settings=raster_settings
                )
                compositor = NormWeightedCompositor()
                renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)
                images = renderer(pointclouds)

                rgb = images[0, ..., :3].squeeze().cpu()
                if DEBUG:
                    filename = "DEBUG_cow_mesh_to_pointcloud_%s_%s.png" % (
                        text_type,
                        pos,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )
Beispiel #10
0
    def read(
        self,
        path: Union[str, Path],
        device,
        path_manager: PathManager,
        **kwargs,
    ) -> Optional[Pointclouds]:
        if not endswith(path, self.known_suffixes):
            return None

        verts, faces, features = _load_ply(
            f=path, path_manager=path_manager, return_vertex_colors=True
        )
        verts = verts.to(device)
        if features is None:
            pointcloud = Pointclouds(points=[verts])
        else:
            pointcloud = Pointclouds(points=[verts], features=[features.to(device)])
        return pointcloud
def cli():
    """
    Basic example for the pulsar sphere renderer using the PyTorch3D interface.

    Writes to `basic-pt3d.png`.
    """
    LOGGER.info("Rendering on GPU...")
    torch.manual_seed(1)
    n_points = 10
    width = 1_000
    height = 1_000
    device = torch.device("cuda")
    # Generate sample data.
    vert_pos = torch.rand(n_points, 3, dtype=torch.float32,
                          device=device) * 10.0
    vert_pos[:, 2] += 25.0
    vert_pos[:, :2] -= 5.0
    vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
    pcl = Pointclouds(points=vert_pos[None, ...], features=vert_col[None, ...])
    # Alternatively, you can also use the look_at_view_transform to get R and T:
    # R, T = look_at_view_transform(
    #     dist=30.0, elev=0.0, azim=180.0, at=((0.0, 0.0, 30.0),), up=((0, 1, 0),),
    # )
    cameras = PerspectiveCameras(
        # The focal length must be double the size for PyTorch3D because of the NDC
        # coordinates spanning a range of two - and they must be normalized by the
        # sensor width (see the pulsar example). This means we need here
        # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
        focal_length=(5.0 * 2.0 / 2.0, ),
        R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
        T=torch.zeros((1, 3), dtype=torch.float32, device=device),
        image_size=((width, height), ),
        device=device,
    )
    vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
    raster_settings = PointsRasterizationSettings(
        image_size=(width, height),
        radius=vert_rad,
    )
    rasterizer = PointsRasterizer(cameras=cameras,
                                  raster_settings=raster_settings)
    renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
    # Render.
    image = renderer(
        pcl,
        gamma=(1.0e-1, ),  # Renderer blending parameter gamma, in [1., 1e-5].
        znear=(1.0, ),
        zfar=(45.0, ),
        radius_world=True,
        bg_col=torch.ones((3, ), dtype=torch.float32, device=device),
    )[0]
    LOGGER.info("Writing image to `%s`.", path.abspath("basic-pt3d.png"))
    imageio.imsave("basic-pt3d.png",
                   (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
    LOGGER.info("Done.")
Beispiel #12
0
    def test_points_renderer_to(self):
        """
        Test moving all the tensors in the points renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        raster_settings = PointsRasterizationSettings(image_size=256,
                                                      radius=0.001,
                                                      points_per_pixel=1)
        cameras = FoVPerspectiveCameras(device=device1,
                                        R=R,
                                        T=T,
                                        aspect_ratio=1.0,
                                        fov=60.0,
                                        zfar=100)
        rasterizer = PointsRasterizer(cameras=cameras,
                                      raster_settings=raster_settings)

        renderer = PointsRenderer(rasterizer=rasterizer,
                                  compositor=AlphaCompositor())

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        pointclouds = Pointclouds(points=verts_padded,
                                  features=torch.randn_like(verts_padded))
        self._check_points_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(pointclouds)
        self.assertEqual(output_images.device, device1)

        # Move renderer and pointclouds to another device and re render
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        pointclouds = pointclouds.to(device2)
        self._check_points_renderer_props_on_device(renderer, device2)
        output_images = renderer(pointclouds)
        self.assertEqual(output_images.device, device2)
    def _get_cube_cloud(self):
        """Get vertices of a unit-cube, with colors assigned according to
        vertex coordinates."""
        vertices = list(
            itertools.product(*zip([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])))
        vertices = np.insert(vertices, 0, [0, 0, 0], axis=0)
        vertices = th.as_tensor(vertices, dtype=th.float32, device=self.device)

        # Map vertices to colors. =RGB(0.25~0.75)
        colors = (0.5 + 0.5 * vertices)
        cloud = Pointclouds(points=vertices[None], features=colors[None])
        return cloud
Beispiel #14
0
    def test_save_load_with_normals(self):
        points = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]],
                              dtype=torch.float32)
        normals = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 4, 1], [1, 0, 0]],
                               dtype=torch.float32)
        features = torch.rand_like(points)

        for do_features, do_normals in itertools.product([True, False],
                                                         [True, False]):
            cloud = Pointclouds(
                points=[points],
                features=[features] if do_features else None,
                normals=[normals] if do_normals else None,
            )
            device = torch.device("cuda:0")

            io = IO()
            with NamedTemporaryFile(mode="w", suffix=".ply") as f:
                io.save_pointcloud(cloud.cuda(), f.name)
                f.flush()
                cloud2 = io.load_pointcloud(f.name, device=device)
            self.assertEqual(cloud2.device, device)
            cloud2 = cloud2.cpu()
            self.assertClose(cloud2.points_padded(), cloud.points_padded())
            if do_normals:
                self.assertClose(cloud2.normals_padded(),
                                 cloud.normals_padded())
            else:
                self.assertIsNone(cloud.normals_padded())
                self.assertIsNone(cloud2.normals_padded())
            if do_features:
                self.assertClose(cloud2.features_packed(), features)
            else:
                self.assertIsNone(cloud2.features_packed())
Beispiel #15
0
    def test_pointcloud(self):
        data = _CommonData()
        clouds = Pointclouds(points=torch.tensor([[data.point]])).extend(2)
        colorful_cloud = Pointclouds(points=torch.tensor([[data.point]]),
                                     features=torch.ones(1, 1, 3)).extend(2)
        points_per_pixel = 2
        # for camera in [data.camera_screen]:
        for camera in (data.camera_ndc, data.camera_screen):
            rasterizer = PointsRasterizer(
                cameras=camera,
                raster_settings=PointsRasterizationSettings(
                    image_size=data.image_size,
                    radius=0.0001,
                    points_per_pixel=points_per_pixel,
                ),
            )
            # when rasterizing we expect only one pixel to be occupied
            rasterizer_output = rasterizer(clouds).idx
            self.assertTupleEqual(rasterizer_output.shape, (2, ) +
                                  data.image_size + (points_per_pixel, ))
            found = torch.nonzero(rasterizer_output != -1)
            self.assertTupleEqual(found.shape, (2, 4))
            self.assertListEqual(found[0].tolist(), [0, data.y, data.x, 0])
            self.assertListEqual(found[1].tolist(), [1, data.y, data.x, 0])

            if camera.in_ndc():
                # Pulsar not currently working in screen space.
                pulsar_renderer = PulsarPointsRenderer(rasterizer=rasterizer)
                pulsar_output = pulsar_renderer(colorful_cloud,
                                                gamma=(0.1, 0.1),
                                                znear=(0.1, 0.1),
                                                zfar=(70, 70))
                self.assertTupleEqual(pulsar_output.shape,
                                      (2, ) + data.image_size + (3, ))
                # Look for points rendered in the red channel only, expecting our one.
                # Check the first batch element only.
                # TODO: Something is odd with the second.
                found = torch.nonzero(pulsar_output[0, :, :, 0])
                self.assertTupleEqual(found.shape, (1, 2))
                self.assertListEqual(found[0].tolist(), [data.y, data.x])
    def init_meshes_clouds(
        batch_size: int = 10,
        num_verts: int = 1000,
        num_faces: int = 3000,
        num_points: int = 3000,
        device: str = "cuda:0",
    ):
        device = torch.device(device)
        nump = torch.randint(low=1, high=num_points, size=(batch_size, ))
        numv = torch.randint(low=3, high=num_verts, size=(batch_size, ))
        numf = torch.randint(low=1, high=num_faces, size=(batch_size, ))
        verts_list = []
        faces_list = []
        points_list = []
        for i in range(batch_size):
            # Randomly choose vertices
            verts = torch.rand((numv[i], 3),
                               dtype=torch.float32,
                               device=device)
            verts.requires_grad_(True)

            # Randomly choose faces. Our tests below compare argmin indices
            # over faces and edges. Argmin is sensitive even to small numeral variations
            # thus we make sure that faces are valid
            # i.e. a face f = (i0, i1, i2) s.t. i0 != i1 != i2,
            # otherwise argmin due to numeral sensitivities cannot be resolved
            faces, allf = [], 0
            validf = numv[i].item() - numv[i].item() % 3
            while allf < numf[i]:
                ff = torch.randperm(numv[i],
                                    device=device)[:validf].view(-1, 3)
                faces.append(ff)
                allf += ff.shape[0]
            faces = torch.cat(faces, 0)
            if faces.shape[0] > numf[i]:
                faces = faces[:numf[i]]

            verts_list.append(verts)
            faces_list.append(faces)

            # Randomly choose points
            points = torch.rand((nump[i], 3),
                                dtype=torch.float32,
                                device=device)
            points.requires_grad_(True)

            points_list.append(points)

        meshes = Meshes(verts_list, faces_list)
        pcls = Pointclouds(points_list)

        return meshes, pcls
Beispiel #17
0
 def warp_canon_depth(self, canon_depth):
     b, h, w = canon_depth.shape
     # grid_3d supposed to be the 3D mesh.
     grid_3d = self.get_warped_3d_grid(canon_depth).reshape(b, -1, 3)
     faces = get_face_idx(b, h, w).to(canon_depth.device)
     # fixme: https://github.com/facebookresearch/pytorch3d/issues/202
     grid_3d = Pointclouds(points=grid_3d, features=faces)
     warped_depth = self.renderer(grid_3d).squeeze()
     # allow some margin out of valid range
     margin = (self.max_depth - self.min_depth) / 2
     warped_depth = warped_depth.clamp(min=self.min_depth - margin,
                                       max=self.max_depth + margin)
     return warped_depth
Beispiel #18
0
def get_per_point_visibility_mask(pointclouds: Pointclouds,
                                  fragments: NamedTuple) -> Pointclouds:
    """
    compute per-point visibility (0/1), append value to pointclouds features
    Returns:
        boolean mask for packed tensors (P_total,)
    """
    P_total = pointclouds.num_points_per_cloud().sum().item()
    P_max = pointclouds.num_points_per_cloud().max().item()
    try:
        mask = fragments.occupancy.bool()  # float
    except:
        mask = fragments.idx[..., 0] >= 0  # bool

    pts_visibility = torch.full((P_total, ),
                                False,
                                dtype=torch.bool,
                                device=pointclouds.device)

    # all rendered points (indices in packed points)
    visible_idx = fragments.idx[mask].unique().long().view(-1)
    visible_idx = visible_idx[visible_idx >= 0]
    pts_visibility[visible_idx] = True
    return pts_visibility
Beispiel #19
0
    def collate_fn(batch):
        out = dict()
        batch_keys = batch[0].keys()
        skip_keys = ['scan_dict']  # These will be manually collated

        # For each not in skip_keys, use default torch collator
        for key in [k for k in batch_keys if k not in skip_keys]:
            out[key] = torch.utils.data._utils.collate.default_collate(
                [d[key] for d in batch])

        scan_all = [
            torch.Tensor(sample['scan_dict']['points']) for sample in batch
        ]
        out['scan'] = Pointclouds(points=scan_all)

        return out
Beispiel #20
0
def rasterize_mc_samples(
    xys: torch.Tensor,
    feats: torch.Tensor,
    image_size_hw: Tuple[int, int],
    radius: float = 0.03,
    topk: int = 5,
    masks: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    Rasterizes Monte-Carlo sampled features back onto the image.

    Specifically, the code uses the PyTorch3D point rasterizer to render
    a z-flat point cloud composed of the xy MC locations and their features.

    Args:
        xys: B x N x 2 2D point locations in PyTorch3D NDC convention
        feats: B x N x dim tensor containing per-point rendered features.
        image_size_hw: Tuple[image_height, image_width] containing
            the size of rasterized image.
        radius: Rasterization point radius.
        topk: The maximum z-buffer size for the PyTorch3D point cloud rasterizer.
        masks: B x N x 1 tensor containing the alpha mask of the
            rendered features.
    """

    if masks is None:
        masks = torch.ones_like(xys[..., :1])

    feats = torch.cat((feats, masks), dim=-1)
    pointclouds = Pointclouds(
        points=torch.cat([xys, torch.ones_like(xys[..., :1])], dim=-1),
        features=feats,
    )

    data_rendered, render_mask, _ = render_point_cloud_pytorch3d(
        PerspectiveCameras(device=feats.device),
        pointclouds,
        render_size=image_size_hw,
        point_radius=radius,
        topk=topk,
    )

    data_rendered, masks_pt = data_rendered.split(
        [data_rendered.shape[1] - 1, 1], dim=1)
    render_mask = masks_pt * render_mask

    return data_rendered, render_mask
Beispiel #21
0
def get_rgbd_point_cloud(
    camera: CamerasBase,
    image_rgb: torch.Tensor,
    depth_map: torch.Tensor,
    mask: Optional[torch.Tensor] = None,
    mask_thr: float = 0.5,
    mask_points: bool = True,
) -> Pointclouds:
    """
    Given a batch of images, depths, masks and cameras, generate a colored
    point cloud by unprojecting depth maps to the  and coloring with the source
    pixel colors.
    """
    imh, imw = image_rgb.shape[2:]

    # convert the depth maps to point clouds using the grid ray sampler
    pts_3d = ray_bundle_to_ray_points(
        NDCMultinomialRaysampler(
            image_width=imw,
            image_height=imh,
            n_pts_per_ray=1,
            min_depth=1.0,
            max_depth=1.0,
        )(camera)._replace(lengths=depth_map[:, 0, ..., None])
    )

    pts_mask = depth_map > 0.0
    if mask is not None:
        pts_mask *= mask > mask_thr
    pts_mask = pts_mask.reshape(-1)

    pts_3d = pts_3d.reshape(-1, 3)[pts_mask]

    pts_colors = torch.nn.functional.interpolate(
        image_rgb,
        # pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got
        #  `List[typing.Any]`.
        size=[imh, imw],
        mode="bilinear",
        align_corners=False,
    )
    pts_colors = pts_colors.permute(0, 2, 3, 1).reshape(-1, 3)[pts_mask]

    return Pointclouds(points=pts_3d[None], features=pts_colors[None])
Beispiel #22
0
    def read(
        self,
        path: Union[str, Path],
        device,
        path_manager: PathManager,
        **kwargs,
    ) -> Optional[Pointclouds]:
        if not endswith(path, self.known_suffixes):
            return None

        verts, faces, features, normals = _load_ply(f=path, path_manager=path_manager)
        verts = verts.to(device)
        if features is not None:
            features = [features.to(device)]
        if normals is not None:
            normals = [normals.to(device)]

        pointcloud = Pointclouds(points=[verts], features=features, normals=normals)
        return pointcloud
Beispiel #23
0
    def __init__(self, dir):
        print("Loading scene ", dir)
        self.ReadConfig(dir)
        a = numpy.loadtxt(dir + "poses_view_matrix.txt")
        self.num_images = a.shape[0] // 4
        self.view_matrices = []
        for i in range(self.num_images):
            V = a[i * 4:(i + 1) * 4, :]
            self.view_matrices.append(V)

        # convert view matrix to R,t
        self.camera_rs = []
        self.camera_ts = []

        for V in self.view_matrices:
            R = V[0:3, 0:3]
            t = V[0:3, 3:4]
            self.camera_rs.append(R)
            self.camera_ts.append(t.transpose())

        self.R = torch.tensor(self.camera_rs).float().to(device)
        self.T = torch.tensor(self.camera_ts).squeeze(1).float().to(device)

        self.image_size = (self.h, self.w)
        self.image_size_tensor = torch.Tensor(
            self.image_size).unsqueeze(0).to(device).int()

        pc = load_ply(
            "/home/dari/Projects/pointrendering2/Code/scenes/tt_train_colmap/point_cloud.ply"
        )
        verts = torch.Tensor(pc[0]).to(device).unsqueeze(0)
        rgb = verts.clone()
        rgb = torch.ones_like(verts)
        # PrintTensorInfo(verts)
        self.point_cloud = Pointclouds(points=verts, features=rgb)

        print("Scene loaded: R, T , K , size")
        PrintTensorInfo(self.R)
        PrintTensorInfo(self.T)
        PrintTensorInfo(self.K)
        PrintTensorInfo(self.image_size_tensor)
    def test_small_faces_case(self):
        for device in [torch.device("cpu"), torch.device("cuda:0")]:
            mesh_vertices = torch.tensor(
                [
                    [-0.0021, -0.3769, 0.7146],
                    [-0.0161, -0.3771, 0.7146],
                    [-0.0021, -0.3771, 0.7147],
                ],
                dtype=torch.float32,
                device=device,
            )
            mesh1_faces = torch.tensor([[0, 2, 1]], device=device)
            mesh2_faces = torch.tensor([[2, 0, 1]], device=device)
            pcd_points = torch.tensor([[-0.3623, -0.5340, 0.7727]],
                                      device=device)
            mesh1 = Meshes(verts=[mesh_vertices], faces=[mesh1_faces])
            mesh2 = Meshes(verts=[mesh_vertices], faces=[mesh2_faces])
            pcd = Pointclouds(points=[pcd_points])

            loss1 = point_mesh_face_distance(mesh1, pcd)
            loss2 = point_mesh_face_distance(mesh2, pcd)
            self.assertClose(loss1, loss2)
Beispiel #25
0
    def test_save_pointcloud(self):
        header = "\n".join([
            "ply",
            "format binary_little_endian 1.0",
            "element vertex 8",
            "property float x",
            "property float y",
            "property float z",
            "property float red",
            "property float green",
            "property float blue",
            "end_header",
            "",
        ]).encode("ascii")
        data = struct.pack("<" + "f" * 48, *range(48))
        points = torch.FloatTensor([0, 1, 2]) + 6 * torch.arange(8)[:, None]
        features = torch.FloatTensor([3, 4, 5]) + 6 * torch.arange(8)[:, None]
        pointcloud = Pointclouds(points=[points], features=[features])

        io = IO()
        with NamedTemporaryFile(mode="rb", suffix=".ply") as f:
            io.save_pointcloud(data=pointcloud, path=f.name)
            f.flush()
            f.seek(0)
            actual_data = f.read()
            reloaded_pointcloud = io.load_pointcloud(f.name)

        self.assertEqual(header + data, actual_data)
        self.assertClose(reloaded_pointcloud.points_list()[0], points)
        self.assertClose(reloaded_pointcloud.features_list()[0], features)

        with NamedTemporaryFile(mode="r", suffix=".ply") as f:
            io.save_pointcloud(data=pointcloud, path=f.name, binary=False)
            reloaded_pointcloud2 = io.load_pointcloud(f.name)
            self.assertEqual(f.readline(), "ply\n")
            self.assertEqual(f.readline(), "format ascii 1.0\n")
        self.assertClose(reloaded_pointcloud2.points_list()[0], points)
        self.assertClose(reloaded_pointcloud2.features_list()[0], features)
Beispiel #26
0
def eval_one_dir(exp_dir, n_pts=50000):
    """
    Function for one directory
    """
    device = torch.device('cuda:0')
    cfg = config.load_config(os.path.join(exp_dir, 'config.yaml'))
    dataset = config.create_dataset(cfg.data, mode='val')
    meshes_gt = dataset.get_meshes().to(device)
    val_gt_pts_file = os.path.join(cfg.data.data_dir, 'val%d.ply' % n_pts)
    if os.path.isfile(val_gt_pts_file):
        points, normals = np.split(read_ply(val_gt_pts_file), 2, axis=1)
        pcl_gt = Pointclouds(
            torch.from_numpy(points[None, ...]).float(),
            torch.from_numpy(normals[None, ...]).float()).to(device)
    else:
        pcl_gt = dataset.get_pointclouds(n_pts).to(device)
        trimesh.Trimesh(pcl_gt.points_packed().cpu().numpy(),
                        vertex_normals=pcl_gt.normals_packed().cpu().numpy(),
                        process=False).export(val_gt_pts_file,
                                              vertex_normal=True)

    # load vis directories
    vis_dir = os.path.join(exp_dir, 'vis')
    vis_files = sorted(get_filenames(vis_dir, '_mesh.ply'))
    iters = [int(os.path.basename(v).split('_')[0]) for v in vis_files]
    best_dict = defaultdict(lambda: float('inf'))
    vis_eval_csv = os.path.join(vis_dir, "evaluation_n%d.csv" % n_pts)
    if not os.path.isfile(vis_eval_csv):
        with open(os.path.join(vis_dir, "evaluation_n%d.csv" % n_pts),
                  "w") as f:
            fieldnames = ['mtime', 'it', 'chamfer_p', 'chamfer_n', 'pf_dist']
            writer = csv.DictWriter(f,
                                    fieldnames=fieldnames,
                                    restval="-",
                                    extrasaction="ignore")
            writer.writeheader()
            mtime0 = None
            for it, vis_file in zip(iters, vis_files):
                eval_dict = OrderedDict()
                mtime = os.path.getmtime(vis_file)
                if mtime0 is None:
                    mtime0 = mtime
                eval_dict['it'] = it
                eval_dict['mtime'] = mtime - mtime0
                val_pts_file = os.path.join(
                    vis_dir,
                    os.path.basename(vis_file).replace('_mesh',
                                                       '_val%d' % n_pts))
                if os.path.isfile(val_pts_file):
                    points, normals = np.split(read_ply(val_pts_file),
                                               2,
                                               axis=1)
                    points = torch.from_numpy(points).float().to(
                        device=device).view(1, -1, 3)
                    normals = torch.from_numpy(normals).float().to(
                        device=device).view(1, -1, 3)
                else:
                    mesh = trimesh.load(vis_file, process=False)
                    # points, normals = pcu.sample_mesh_poisson_disk(
                    #     mesh.vertices, mesh.faces,
                    #     mesh.vertex_normals.ravel().reshape(-1, 3), n_pts, use_geodesic_distance=True)
                    # p_idx = np.random.permutation(points.shape[0])[:n_pts]
                    # points = points[p_idx, ...]
                    # normals = normals[p_idx, ...]
                    # points = torch.from_numpy(points).float().to(
                    #     device=device).view(1, -1, 3)
                    # normals = torch.from_numpy(normals).float().to(
                    #     device=device).view(1, -1, 3)
                    meshes = Meshes(
                        torch.from_numpy(mesh.vertices[None, ...]).float(),
                        torch.from_numpy(mesh.faces[None,
                                                    ...]).float()).to(device)
                    points, normals = sample_points_from_meshes(
                        meshes, n_pts, return_normals=True)
                    trimesh.Trimesh(points.cpu().numpy()[0],
                                    vertex_normals=normals.cpu().numpy()[0],
                                    process=False).export(val_pts_file,
                                                          vertex_normal=True)
                pcl = Pointclouds(points, normals)
                chamfer_p, chamfer_n = chamfer_distance(
                    points,
                    pcl_gt.points_padded(),
                    x_normals=normals,
                    y_normals=pcl_gt.normals_padded(),
                )
                eval_dict['chamfer_p'] = chamfer_p.item()
                eval_dict['chamfer_n'] = chamfer_n.item()
                pf_dist = point_mesh_face_distance(meshes_gt, pcl)
                eval_dict['pf_dist'] = pf_dist.item()
                writer.writerow(eval_dict)
                for k, v in eval_dict.items():
                    if v < best_dict[k]:
                        best_dict[k] = v
                        print('best {} so far ({}): {:.4g}'.format(
                            k, vis_file, v))

    # generation dictories
    gen_dir = os.path.join(exp_dir, 'generation')
    if not os.path.isdir(gen_dir):
        return

    final_file = os.path.join(gen_dir, 'mesh.ply')
    val_pts_file = final_file[:-4] + '_val%d' % n_pts + '.ply'
    if not os.path.isfile(final_file):
        return

    gen_file_csv = os.path.join(gen_dir, "evaluation_n%d.csv" % n_pts)
    if not os.path.isfile(gen_file_csv):
        with open(os.path.join(gen_dir, "evaluation_n%d.csv" % n_pts),
                  "w") as f:
            fieldnames = ['chamfer_p', 'chamfer_n', 'pf_dist']
            writer = csv.DictWriter(f,
                                    fieldnames=fieldnames,
                                    restval="-",
                                    extrasaction="ignore")
            writer.writeheader()
            eval_dict = OrderedDict()
            mesh = trimesh.load(final_file)
            # points, normals = pcu.sample_mesh_poisson_disk(
            #     mesh.vertices, mesh.faces,
            #     mesh.vertex_normals.ravel().reshape(-1, 3), n_pts, use_geodesic_distance=True)
            # p_idx = np.random.permutation(points.shape[0])[:n_pts]
            # points = points[p_idx, ...]
            # normals = normals[p_idx, ...]
            # points = torch.from_numpy(points).float().to(
            #     device=device).view(1, -1, 3)
            # normals = torch.from_numpy(normals).float().to(
            #     device=device).view(1, -1, 3)
            meshes = Meshes(
                torch.from_numpy(mesh.vertices[None, ...]).float(),
                torch.from_numpy(mesh.faces[None, ...]).float()).to(device)
            points, normals = sample_points_from_meshes(meshes,
                                                        n_pts,
                                                        return_normals=True)
            trimesh.Trimesh(points.cpu().numpy()[0],
                            vertex_normals=normals.cpu().numpy()[0],
                            process=False).export(val_pts_file,
                                                  vertex_normal=True)
            pcl = Pointclouds(points, normals)
            chamfer_p, chamfer_n = chamfer_distance(
                points,
                pcl_gt.points_padded(),
                x_normals=normals,
                y_normals=pcl_gt.normals_padded(),
            )
            eval_dict['chamfer_p'] = chamfer_p.item()
            eval_dict['chamfer_n'] = chamfer_n.item()
            pf_dist = point_mesh_face_distance(meshes_gt, pcl)
            eval_dict['pf_dist'] = pf_dist.item()
            writer.writerow(eval_dict)
            for k, v in eval_dict.items():
                if v < best_dict[k]:
                    best_dict[k] = v
                    print('best {} so far ({}): {:.4g}'.format(
                        k, final_file, v))
Beispiel #27
0
    def render_yaw(self,
                   im,
                   depth,
                   v_before=None,
                   v_after=None,
                   rotations=None,
                   maxr=90,
                   nsample=9,
                   crop_mesh=None):
        b, c, h, w = im.shape
        grid_3d = self.depth_to_3d_grid(depth)

        if crop_mesh is not None:
            top, bottom, left, right = crop_mesh  # pixels from border to be cropped
            if top > 0:
                grid_3d[:, :top, :, 1] = grid_3d[:, top:top + 1, :,
                                                 1].repeat(1, top, 1)
                grid_3d[:, :top, :, 2] = grid_3d[:, top:top + 1, :,
                                                 2].repeat(1, top, 1)
            if bottom > 0:
                grid_3d[:, -bottom:, :, 1] = grid_3d[:, -bottom - 1:-bottom, :,
                                                     1].repeat(1, bottom, 1)
                grid_3d[:, -bottom:, :, 2] = grid_3d[:, -bottom - 1:-bottom, :,
                                                     2].repeat(1, bottom, 1)
            if left > 0:
                grid_3d[:, :, :left, 0] = grid_3d[:, :, left:left + 1,
                                                  0].repeat(1, 1, left)
                grid_3d[:, :, :left, 2] = grid_3d[:, :, left:left + 1,
                                                  2].repeat(1, 1, left)
            if right > 0:
                grid_3d[:, :, -right:, 0] = grid_3d[:, :, -right - 1:-right,
                                                    0].repeat(1, 1, right)
                grid_3d[:, :, -right:, 2] = grid_3d[:, :, -right - 1:-right,
                                                    2].repeat(1, 1, right)

        grid_3d = grid_3d.reshape(b, -1, 3)
        im_trans = []

        # inverse warp
        if v_before is not None:
            rot_mat, trans_xyz = get_transform_matrices(v_before)
            grid_3d = self.translate_pts(grid_3d, -trans_xyz)
            grid_3d = self.rotate_pts(grid_3d, rot_mat.transpose(2, 1))

        if rotations is None:
            rotations = torch.linspace(-math.pi / 180 * maxr,
                                       math.pi / 180 * maxr, nsample)
        for i, ri in enumerate(rotations):
            ri = torch.FloatTensor([0, ri, 0]).to(im.device).view(1, 3)
            rot_mat_i, _ = get_transform_matrices(ri)
            grid_3d_i = self.rotate_pts(grid_3d, rot_mat_i.repeat(b, 1, 1))

            if v_after is not None:
                if len(v_after.shape) == 3:
                    v_after_i = v_after[i]
                else:
                    v_after_i = v_after
                rot_mat, trans_xyz = get_transform_matrices(v_after_i)
                grid_3d_i = self.rotate_pts(grid_3d_i, rot_mat)
                grid_3d_i = self.translate_pts(grid_3d_i, trans_xyz)

            textures = get_textures_from_im(im, tx_size=self.tex_cube_size)
            grid_3d_i = Pointclouds(points=grid_3d_i, features=textures)
            warped_images = self.renderer(grid_3d_i).clamp(min=-1., max=1.)
            # print("扭过来的图像大小", warped_images.shape)  torch.Size([8, 64, 64, 3])
            im_trans += [warped_images]
        return torch.stack(im_trans, 1)  # b x t x c x h x w
Beispiel #28
0
def _add_pointcloud_trace(
    fig: go.Figure,
    pointclouds: Pointclouds,
    trace_name: str,
    subplot_idx: int,
    ncols: int,
    max_points_per_pointcloud: int,
    marker_size: int,
):  # pragma: no cover
    """
    Adds a trace rendering a Pointclouds object to the passed in figure, with
    a given name and in a specific subplot.

    Args:
        fig: plotly figure to add the trace within.
        pointclouds: Pointclouds object to render. It can be batched.
        trace_name: name to label the trace with.
        subplot_idx: identifies the subplot, with 0 being the top left.
        ncols: the number of subplots per row.
        max_points_per_pointcloud: the number of points to render, which are randomly sampled.
        marker_size: the size of the rendered points
    """
    pointclouds = pointclouds.detach().cpu().subsample(
        max_points_per_pointcloud)
    verts = pointclouds.points_packed()
    features = pointclouds.features_packed()

    color = None
    if features is not None:
        if features.shape[1] == 4:  # rgba
            template = "rgb(%d, %d, %d, %f)"
            rgb = (features[:, :3].clamp(0.0, 1.0) * 255).int()
            color = [
                template % (*rgb_, a_)
                for rgb_, a_ in zip(rgb, features[:, 3])
            ]

        if features.shape[1] == 3:
            template = "rgb(%d, %d, %d)"
            rgb = (features.clamp(0.0, 1.0) * 255).int()
            color = [template % (r, g, b) for r, g, b in rgb]

    row = subplot_idx // ncols + 1
    col = subplot_idx % ncols + 1
    fig.add_trace(
        go.Scatter3d(
            x=verts[:, 0],
            y=verts[:, 1],
            z=verts[:, 2],
            marker={
                "color": color,
                "size": marker_size
            },
            mode="markers",
            name=trace_name,
        ),
        row=row,
        col=col,
    )

    # Access the current subplot's scene configuration
    plot_scene = "scene" + str(subplot_idx + 1)
    current_layout = fig["layout"][plot_scene]

    # update the bounds of the axes for the current trace
    verts_center = verts.mean(0)
    max_expand = (verts.max(0)[0] - verts.min(0)[0]).max()
    _update_axes_bounds(verts_center, max_expand, current_layout)
Beispiel #29
0
    def test_simple_sphere(self):
        device = torch.device("cuda:0")

        # Load reference image
        ref_filename = "test_simple_pointcloud_sphere.png"
        image_ref_filename = DATA_DIR / ref_filename

        # Rescale image_ref to the 0 - 1 range and convert to a binary mask.
        image_ref = convert_image_to_binary_mask(image_ref_filename).to(
            torch.int32)

        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(points=verts_padded)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(image_size=256,
                                                      radius=5e-2,
                                                      points_per_pixel=1)

        #################################
        #  1. Test init without cameras.
        ##################################

        # Initialize without passing in the cameras
        rasterizer = PointsRasterizer()

        # Check that omitting the cameras in both initialization
        # and the forward pass throws an error:
        with self.assertRaisesRegex(ValueError, "Cameras must be specified"):
            rasterizer(pointclouds)

        ##########################################
        # 2. Test rasterizing a single pointcloud
        ##########################################

        fragments = rasterizer(pointclouds,
                               cameras=cameras,
                               raster_settings=raster_settings)

        # Convert idx to a binary mask
        image = fragments.idx[0, ..., 0].squeeze().cpu()
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere_points.png")

        self.assertTrue(torch.allclose(image, image_ref[..., 0]))

        ########################################
        #  3. Test with a batch of pointclouds
        ########################################

        batch_size = 10
        pointclouds = pointclouds.extend(batch_size)
        fragments = rasterizer(pointclouds,
                               cameras=cameras,
                               raster_settings=raster_settings)
        for i in range(batch_size):
            image = fragments.idx[i, ..., 0].squeeze().cpu()
            image[image >= 0] = 1.0
            image[image < 0] = 0.0
            self.assertTrue(torch.allclose(image, image_ref[..., 0]))
Beispiel #30
0
def point_mesh_face_distance(meshes: Meshes, pcls: Pointclouds):
    """
    Computes the distance between a pointcloud and a mesh within a batch.
    Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
    sum of two distances, namely `point_face(mesh, pcl) + face_point(mesh, pcl)`

    `point_face(mesh, pcl)`: Computes the squared distance of each point p in pcl
        to the closest triangular face in mesh and averages across all points in pcl
    `face_point(mesh, pcl)`: Computes the squared distance of each triangular face in
        mesh to the closest point in pcl and averages across all faces in mesh.

    The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
    and then averaged across the batch.

    Args:
        meshes: A Meshes data structure containing N meshes
        pcls: A Pointclouds data structure containing N pointclouds

    Returns:
        loss: The `point_face(mesh, pcl) + face_point(mesh, pcl)` distance
            between all `(mesh, pcl)` in a batch averaged across the batch.
    """

    if len(meshes) != len(pcls):
        raise ValueError("meshes and pointclouds must be equal sized batches")
    N = len(meshes)

    # packed representation for pointclouds
    points = pcls.points_packed()  # (P, 3)
    points_first_idx = pcls.cloud_to_packed_first_idx()
    max_points = pcls.num_points_per_cloud().max().item()

    # packed representation for faces
    verts_packed = meshes.verts_packed()
    faces_packed = meshes.faces_packed()
    tris = verts_packed[faces_packed]  # (T, 3, 3)
    tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
    max_tris = meshes.num_faces_per_mesh().max().item()

    # point to face distance: shape (P,)
    point_to_face = point_face_distance(points, points_first_idx, tris,
                                        tris_first_idx, max_points)

    # weight each example by the inverse of number of points in the example
    point_to_cloud_idx = pcls.packed_to_cloud_idx()  # (sum(P_i),)
    num_points_per_cloud = pcls.num_points_per_cloud()  # (N,)
    weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
    weights_p = 1.0 / weights_p.float()
    point_to_face = point_to_face * weights_p
    point_dist = point_to_face.sum() / N

    # face to point distance: shape (T,)
    face_to_point = face_point_distance(points, points_first_idx, tris,
                                        tris_first_idx, max_tris)

    # weight each example by the inverse of number of faces in the example
    tri_to_mesh_idx = meshes.faces_packed_to_mesh_idx()  # (sum(T_n),)
    num_tris_per_mesh = meshes.num_faces_per_mesh()  # (N, )
    weights_t = num_tris_per_mesh.gather(0, tri_to_mesh_idx)
    weights_t = 1.0 / weights_t.float()
    face_to_point = face_to_point * weights_t
    face_dist = face_to_point.sum() / N

    return point_dist + face_dist