Exemple #1
0
def visualize_nerf_outputs(nerf_out: dict, output_cache: List, viz: Visdom,
                           visdom_env: str):
    """
    Visualizes the outputs of the `RadianceFieldRenderer`.

    Args:
        nerf_out: An output of the validation rendering pass.
        output_cache: A list with outputs of several training render passes.
        viz: A visdom connection object.
        visdom_env: The name of visdom environment for visualization.
    """

    # Show the training images.
    ims = torch.stack([o["image"] for o in output_cache])
    ims = torch.cat(list(ims), dim=1)
    viz.image(
        ims.permute(2, 0, 1),
        env=visdom_env,
        win="images",
        opts={"title": "train_images"},
    )

    # Show the coarse and fine renders together with the ground truth images.
    ims_full = torch.cat(
        [
            nerf_out[imvar][0].permute(2, 0, 1).detach().cpu().clamp(0.0, 1.0)
            for imvar in ("rgb_coarse", "rgb_fine", "rgb_gt")
        ],
        dim=2,
    )
    viz.image(
        ims_full,
        env=visdom_env,
        win="images_full",
        opts={"title": "coarse | fine | target"},
    )

    # Make a 3D plot of training cameras and their emitted rays.
    camera_trace = {
        f"camera_{ci:03d}": o["camera"].cpu()
        for ci, o in enumerate(output_cache)
    }
    ray_pts_trace = {
        f"ray_pts_{ci:03d}": Pointclouds(
            ray_bundle_to_ray_points(
                o["coarse_ray_bundle"]).detach().cpu().view(1, -1, 3))
        for ci, o in enumerate(output_cache)
    }
    plotly_plot = plot_scene(
        {
            "training_scene": {
                **camera_trace,
                **ray_pts_trace,
            },
        },
        pointcloud_max_points=5000,
        pointcloud_marker_size=1,
        camera_scale=0.3,
    )
    viz.plotlyplot(plotly_plot, env=visdom_env, win="scenes")
def _visdom_plot_scene(
    train_cameras,
    test_cameras,
) -> None:
    from pytorch3d.vis.plotly_vis import plot_scene

    p = plot_scene(
        {"scene": {
            "train_cams": train_cameras,
            "test_cams": test_cameras,
        }})
    from visdom import Visdom

    viz = Visdom()
    viz.plotlyplot(p, env="cam_traj_dbg", win="cam_trajs")
    import pdb

    pdb.set_trace()
Exemple #3
0
    def show_depth(self, depth_loss: float, name_postfix: str,
                   loss_mask_now: torch.Tensor):
        self._viz.images(
            torch.cat(
                (
                    make_depth_image(self.depth_render, loss_mask_now),
                    make_depth_image(self.depth_map, loss_mask_now),
                ),
                dim=3,
            ),
            env=self.visdom_env,
            win="depth_abs" + name_postfix,
            opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}"},
        )
        self._viz.images(
            loss_mask_now,
            env=self.visdom_env,
            win="depth_abs" + name_postfix + "_mask",
            opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}_mask"},
        )
        self._viz.images(
            self.depth_mask,
            env=self.visdom_env,
            win="depth_abs" + name_postfix + "_maskd",
            opts={
                "title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}_maskd"
            },
        )

        # show the 3D plot
        # pyre-fixme[9]: viewpoint_trivial has type `PerspectiveCameras`; used as
        #  `TensorProperties`.
        viewpoint_trivial: PerspectiveCameras = PerspectiveCameras().to(
            loss_mask_now.device)
        pcl_pred = get_rgbd_point_cloud(
            viewpoint_trivial,
            self.image_render,
            self.depth_render,
            # mask_crop,
            torch.ones_like(self.depth_render),
            # loss_mask_now,
        )
        pcl_gt = get_rgbd_point_cloud(
            viewpoint_trivial,
            self.image_rgb_masked,
            self.depth_map,
            # mask_crop,
            torch.ones_like(self.depth_map),
            # loss_mask_now,
        )
        _pcls = {
            pn: p
            for pn, p in zip(("pred_depth", "gt_depth"), (pcl_pred, pcl_gt))
            if int(p.num_points_per_cloud()) > 0
        }
        plotlyplot = plot_scene(
            {f"pcl{name_postfix}": _pcls},
            camera_scale=1.0,
            pointcloud_max_points=10000,
            pointcloud_marker_size=1,
        )
        self._viz.plotlyplot(
            plotlyplot,
            env=self.visdom_env,
            win=f"pcl{name_postfix}",
        )
Exemple #4
0
    def _gen_and_render_pointcloud(self, max_frames, load_dataset_point_cloud,
                                   dataset_key):
        dataset = self.datasets[dataset_key]
        # load the point cloud of the first sequence
        sequence_show = list(dataset.seq_annots.keys())[0]
        device = torch.device("cuda:0")

        point_cloud, sequence_frame_data = get_implicitron_sequence_pointcloud(
            dataset,
            sequence_name=sequence_show,
            mask_points=True,
            max_frames=max_frames,
            num_workers=10,
            load_dataset_point_cloud=load_dataset_point_cloud,
        )

        # render on gpu
        point_cloud = point_cloud.to(device)
        cameras = sequence_frame_data.camera.to(device)

        # render the point_cloud from the viewpoint of loaded cameras
        images_render = torch.cat([
            self._render_one_pointcloud(
                point_cloud,
                cameras[frame_i],
                (
                    dataset.image_height,
                    dataset.image_width,
                ),
            ) for frame_i in range(len(cameras))
        ]).cpu()
        images_gt_and_render = torch.cat(
            [sequence_frame_data.image_rgb, images_render], dim=3)

        imfile = os.path.join(
            os.path.split(os.path.abspath(__file__))[0],
            "test_dataset_visualize" + f"_max_frames={max_frames}" +
            f"_load_pcl={load_dataset_point_cloud}.png",
        )
        print(f"Exporting image {imfile}.")
        torchvision.utils.save_image(images_gt_and_render, imfile, nrow=2)

        if self.visdom is not None:
            test_name = f"{max_frames}_{load_dataset_point_cloud}_{dataset_key}"
            self.visdom.images(
                images_gt_and_render,
                env="test_dataset_visualize",
                win=f"pcl_renders_{test_name}",
                opts={"title": f"pcl_renders_{test_name}"},
            )
            plotlyplot = plot_scene(
                {
                    "scene_batch": {
                        "cameras": cameras,
                        "point_cloud": point_cloud,
                    }
                },
                camera_scale=1.0,
                pointcloud_max_points=10000,
                pointcloud_marker_size=1.0,
            )
            self.visdom.plotlyplot(
                plotlyplot,
                env="test_dataset_visualize",
                win=f"pcl_{test_name}",
            )
Exemple #5
0
from pytorch3d.vis.plotly_vis import plot_scene
from pytorch3d.renderer import TexturesVertex


def orthogonal_procrustes(M):
    u, s, v = M.svd()
    return u.mm(v.T), s.sum()


def align(A, M):
    R, s = orthogonal_procrustes(M)
    return A.mm(R) * s


vis_file = 'outputs/manogcnx3_1x_freihand_aligned+uvd/inference/freihand_test/visualize.pth'
results = torch.load(vis_file)

idxs = [1, 2, 3, 4, 5]
for idx in idxs:
    verts, faces = results[idx]['uvds'], results[idx]['faces']
    # Initialize each vertex to be white in color.
    verts_rgb = torch.ones_like(verts)[None] - 0.35  # (1, V, 3)
    textures = TexturesVertex(verts_features=verts_rgb)

    mesh = Meshes(verts=[verts], faces=[faces], textures=textures)

    # Render the plotly figure
    fig = plot_scene({"subplot1": {"cow_mesh": mesh}})

    fig.write_html(f'{idx}.html')