def test_cpu_vs_cuda_naive(self):
        """
        Compare naive versions of cuda and cpp
        """

        torch.manual_seed(231)
        image_size = 64
        radius = 0.1**2
        faces_per_pixel = 3
        device = torch.device("cpu")
        meshes_cpu = ico_sphere(0, device)
        verts1, faces1 = meshes_cpu.get_mesh_verts_faces(0)
        verts1.requires_grad = True
        meshes_cpu = Meshes(verts=[verts1], faces=[faces1])

        device = torch.device("cuda:0")
        meshes_cuda = ico_sphere(0, device)
        verts2, faces2 = meshes_cuda.get_mesh_verts_faces(0)
        verts2.requires_grad = True
        meshes_cuda = Meshes(verts=[verts2], faces=[faces2])

        args_cpu = (meshes_cpu, image_size, radius, faces_per_pixel)
        args_cuda = (meshes_cuda, image_size, radius, faces_per_pixel, 0, 0)
        self._compare_impls(
            rasterize_meshes,
            rasterize_meshes,
            args_cpu,
            args_cuda,
            verts1,
            verts2,
            compare_grads=True,
        )
    def test_compare_coarse_cpu_vs_cuda(self):
        torch.manual_seed(231)
        N = 1
        image_size = 512
        blur_radius = 0.0
        bin_size = 32
        max_faces_per_bin = 20

        device = torch.device("cpu")
        meshes = ico_sphere(2, device)

        faces = meshes.faces_packed()
        verts = meshes.verts_packed()
        faces_verts = verts[faces]
        num_faces_per_mesh = meshes.num_faces_per_mesh()
        mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
        args = (
            faces_verts,
            mesh_to_face_first_idx,
            num_faces_per_mesh,
            image_size,
            blur_radius,
            bin_size,
            max_faces_per_bin,
        )
        bin_faces_cpu = _C._rasterize_meshes_coarse(*args)

        device = torch.device("cuda:0")
        meshes = ico_sphere(2, device)

        faces = meshes.faces_packed()
        verts = meshes.verts_packed()
        faces_verts = verts[faces]
        num_faces_per_mesh = meshes.num_faces_per_mesh()
        mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
        args = (
            faces_verts,
            mesh_to_face_first_idx,
            num_faces_per_mesh,
            image_size,
            blur_radius,
            bin_size,
            max_faces_per_bin,
        )
        bin_faces_cuda = _C._rasterize_meshes_coarse(*args)

        # Bin faces might not be the same: CUDA version might write them in
        # any order. But if we sort the non-(-1) elements of the CUDA output
        # then they should be the same.
        for n in range(N):
            for by in range(bin_faces_cpu.shape[1]):
                for bx in range(bin_faces_cpu.shape[2]):
                    K = (bin_faces_cuda[n, by, bx] != -1).sum().item()
                    idxs_cpu = bin_faces_cpu[n, by, bx].tolist()
                    idxs_cuda = bin_faces_cuda[n, by, bx].tolist()
                    idxs_cuda[:K] = sorted(idxs_cuda[:K])
                    self.assertEqual(idxs_cpu, idxs_cuda)
    def rasterize_meshes_cuda_with_init(
        num_meshes: int,
        ico_level: int,
        image_size: int,
        blur_radius: float,
        bin_size: int,
        max_faces_per_bin: int,
    ):

        meshes = ico_sphere(ico_level, torch.device("cuda:0"))
        meshes_batch = meshes.extend(num_meshes)
        torch.cuda.synchronize()

        def rasterize():
            rasterize_meshes(
                meshes_batch,
                image_size,
                blur_radius,
                8,
                bin_size,
                max_faces_per_bin,
            )
            torch.cuda.synchronize()

        return rasterize


# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.
Exemple #4
0
    def test_gather_scatter(self):
        """
        Check gather_scatter cuda and python versions give the same results.
        Check that gather_scatter cuda version throws an error if cpu tensors
        are given as input.
        """
        device = get_random_cuda_device()
        mesh = ico_sphere()
        verts = mesh.verts_packed()
        edges = mesh.edges_packed()
        w0 = nn.Linear(3, 1)
        input = w0(verts)

        # undirected
        output_python = gather_scatter_python(input, edges, False)
        output_cuda = _C.gather_scatter(input.to(device=device),
                                        edges.to(device=device), False, False)
        self.assertClose(output_cuda.cpu(), output_python)

        output_cpu = _C.gather_scatter(input.cpu(), edges.cpu(), False, False)
        self.assertClose(output_cpu, output_python)

        # directed
        output_python = gather_scatter_python(input, edges, True)
        output_cuda = _C.gather_scatter(input.to(device=device),
                                        edges.to(device=device), True, False)
        self.assertClose(output_cuda.cpu(), output_python)
        output_cpu = _C.gather_scatter(input.cpu(), edges.cpu(), True, False)
        self.assertClose(output_cpu, output_python)
Exemple #5
0
    def test_gather_scatter(self):
        """
        Check gather_scatter cuda and python versions give the same results.
        Check that gather_scatter cuda version throws an error if cpu tensors
        are given as input.
        """
        device = torch.device("cuda:0")
        mesh = ico_sphere()
        verts = mesh.verts_packed()
        edges = mesh.edges_packed()
        w0 = nn.Linear(3, 1)
        input = w0(verts)

        # output
        output_cpu = gather_scatter_python(input, edges, False)
        output_cuda = _C.gather_scatter(input.to(device=device),
                                        edges.to(device=device), False, False)
        self.assertClose(output_cuda.cpu(), output_cpu)
        with self.assertRaises(Exception) as err:
            _C.gather_scatter(input.cpu(), edges.cpu(), False, False)
        self.assertTrue("Not implemented on the CPU" in str(err.exception))

        # directed
        output_cpu = gather_scatter_python(input, edges, True)
        output_cuda = _C.gather_scatter(input.to(device=device),
                                        edges.to(device=device), True, False)
        self.assertClose(output_cuda.cpu(), output_cpu)
Exemple #6
0
    def test_ball_query_output_simple(self):
        device = get_random_cuda_device()
        N, P1, P2, K = 5, 8, 16, 4
        sphere = ico_sphere(level=2, device=device).extend(N)
        points_1 = sample_points_from_meshes(sphere, P1)
        points_2 = sample_points_from_meshes(sphere, P2) * 5.0
        radius = 6.0

        naive_out = self._ball_query_naive(
            points_1, points_2, lengths1=None, lengths2=None, K=K, radius=radius
        )
        cuda_out = ball_query(points_1, points_2, K=K, radius=radius)

        # All points should have N sample neighbors as radius is large
        # Zero is a valid index but can only be present once (i.e. no zero padding)
        naive_out_zeros = (naive_out.idx == 0).sum(dim=-1).max()
        cuda_out_zeros = (cuda_out.idx == 0).sum(dim=-1).max()
        self.assertTrue(naive_out_zeros == 0 or naive_out_zeros == 1)
        self.assertTrue(cuda_out_zeros == 0 or cuda_out_zeros == 1)

        # All points should now have zero sample neighbors as radius is small
        radius = 0.5
        naive_out = self._ball_query_naive(
            points_1, points_2, lengths1=None, lengths2=None, K=K, radius=radius
        )
        cuda_out = ball_query(points_1, points_2, K=K, radius=radius)
        naive_out_allzeros = (naive_out.idx == -1).all()
        cuda_out_allzeros = (cuda_out.idx == -1).sum()
        self.assertTrue(naive_out_allzeros)
        self.assertTrue(cuda_out_allzeros)
    def test_cuda_naive_vs_binned_perspective_correct(self):
        meshes = ico_sphere(2, device=torch.device("cuda"))
        verts1, faces1 = meshes.get_mesh_verts_faces(0)
        verts1.requires_grad = True
        meshes1 = Meshes(verts=[verts1], faces=[faces1])
        verts2 = verts1.detach().clone().requires_grad_(True)
        faces2 = faces1.detach().clone()
        meshes2 = Meshes(verts=[verts2], faces=[faces2])

        kwargs = {"image_size": 64, "perspective_correct": True}
        fn1 = functools.partial(rasterize_meshes,
                                meshes1,
                                bin_size=0,
                                **kwargs)
        fn2 = functools.partial(rasterize_meshes,
                                meshes2,
                                bin_size=8,
                                **kwargs)
        args = ()
        self._compare_impls(fn1,
                            fn2,
                            args,
                            args,
                            verts1,
                            verts2,
                            compare_grads=True)
def transform_meshes_to_camera_coord_system(meshes, boxes, zranges, Ks, imsize):
    device = meshes.device
    new_verts, new_faces = [], []
    h, w = imsize
    im_size = torch.tensor([w, h], device=device).view(1, 2)
    assert len(meshes) == len(zranges)
    for i in range(len(meshes)):
        verts, faces = meshes.get_mesh_verts_faces(i)
        if verts.numel() == 0:
            verts, faces = ico_sphere(level=3, device=device).get_mesh_verts_faces(0)
        assert not torch.isnan(verts).any()
        assert not torch.isnan(faces).any()
        roi = boxes[i].view(1, 4)
        zrange = zranges[i].view(1, 2)
        K = Ks[i].view(1, 3)
        cub3D = shape_utils.box2D_to_cuboid3D(zrange, K, roi, im_size)
        txz, tyz = shape_utils.cuboid3D_to_unitbox3D(cub3D)

        # image to camera coords
        verts[:, 0] = -verts[:, 0]
        verts[:, 1] = -verts[:, 1]

        # transform to destination size
        xz = verts[:, [0, 2]]
        yz = verts[:, [1, 2]]
        pxz = txz.inverse(xz.view(1, -1, 2)).squeeze(0)
        pyz = tyz.inverse(yz.view(1, -1, 2)).squeeze(0)
        verts = torch.stack([pxz[:, 0], pyz[:, 0], pxz[:, 1]], dim=1).to(
            device, dtype=torch.float32
        )

        new_verts.append(verts)
        new_faces.append(faces)

    return Meshes(verts=new_verts, faces=new_faces)
    def rasterize_meshes_cpu_with_init(num_meshes: int, ico_level: int,
                                       image_size: int, blur_radius: float):
        meshes = ico_sphere(ico_level, torch.device("cpu"))
        meshes_batch = meshes.extend(num_meshes)

        def rasterize():
            rasterize_meshes(meshes_batch, image_size, blur_radius, bin_size=0)

        return rasterize
Exemple #10
0
def deform_sphere():

    meshes = ico_sphere(3)
    meshes = ARAP_from_meshes(meshes)  # convert to ARAP obejct
    N = meshes.num_verts_per_mesh()[0]

    handle_verts = [26]
    handle_pos = meshes.verts_padded()[0][handle_verts]
    handle_pos_shifted = handle_pos.clone()

    # static as furthest vert
    static_verts = [
        max(range(N),
            key=lambda x: torch.norm(meshes.verts_padded()[0][x] - handle_pos[
                0]))
    ]
    static_verts = add_n_ring_neighbours(meshes, static_verts, n=5)

    trisurfs = plot_meshes(ax,
                           meshes,
                           handle_verts=handle_verts,
                           static_verts=static_verts,
                           prop=False,
                           change_lims=True,
                           color="gray")

    disp_vec = meshes.C[0] - handle_pos[0]  # displace towards centre of mass

    n_frames = 100
    disp_frac = 1.2  # fraction of full disp_vec to move in animation
    step = disp_frac * 4 / n_frames  # moves

    def anim(i):
        [x.remove() for x in trisurfs]  # remove previous frame's mesh

        if i < n_frames / 4 or i > 3 * n_frames / 4:
            direction = 1
        else:
            direction = -1

        handle_pos_shifted[0] += direction * step * disp_vec

        ## deform, replot
        meshes.solve(static_verts=static_verts,
                     handle_verts=handle_verts,
                     handle_verts_pos=handle_pos_shifted,
                     n_its=1)  ## run ARAP

        trisurfs[:] = plot_meshes(ax,
                                  meshes,
                                  handle_verts=handle_verts,
                                  static_verts=static_verts,
                                  prop=False,
                                  color="gray")

    ax.axis("off")
    save_animation(fig, anim, n_frames=n_frames, title="sphere", fps=30)
    def rasterize_meshes_python_with_init(num_meshes: int, ico_level: int,
                                          image_size: int, blur_radius: float):
        device = torch.device("cpu")
        meshes = ico_sphere(ico_level, device)
        meshes_batch = meshes.extend(num_meshes)

        def rasterize():
            rasterize_meshes_python(meshes_batch, image_size, blur_radius)

        return rasterize
    def forward(self, imgs):
        N = imgs.shape[0]
        device = imgs.device

        img_feats = self.backbone(imgs)
        P = self._get_projection_matrix(N, device)

        init_meshes = ico_sphere(self.ico_sphere_level, device).extend(N)
        refined_meshes = self.mesh_head(img_feats, init_meshes, P, subdivide=True)
        return None, refined_meshes
Exemple #13
0
def create_model(cfg, device, mode="train", camera_model=None, **kwargs):
    ''' Returns model

    Args:
        cfg (edict): imported yaml config
        device (device): pytorch device
    '''
    if cfg.model.type == 'point':
        decoder = None

    texture = None
    use_lighting = (cfg.renderer is not None
                    and not cfg.renderer.get('is_neural_texture', True))
    if use_lighting:
        texture = LightingTexture()
    else:
        if 'rgb' not in cfg.model.decoder_kwargs.out_dims:
            Texture = get_class_from_string(cfg.model.texture_type)
            cfg.model.texture_kwargs[
                'c_dim'] = cfg.model.decoder_kwargs.out_dims.get('latent', 0)
            texture_decoder = Texture(**cfg.model.texture_kwargs)
        else:
            texture_decoder = decoder
            logger_py.info("Decoder used as NeuralTexture")

        texture = NeuralTexture(
            view_dependent=cfg.model.texture_kwargs.view_dependent,
            decoder=texture_decoder).to(device=device)
        logger_py.info("Created NeuralTexture {}".format(texture.__class__))
        logger_py.info(texture)

    Model = get_class_from_string("DSS.models.{}_modeling.Model".format(
        cfg.model.type))

    # if not using decoder, then use non-parameterized point renderer
    # create icosphere as initial point cloud
    sphere_mesh = ico_sphere(level=4)
    sphere_mesh.scale_verts_(0.5)
    points, normals = sample_points_from_meshes(
        sphere_mesh,
        num_samples=int(cfg['model']['model_kwargs']['n_points_per_cloud']),
        return_normals=True)
    colors = torch.ones_like(points)
    renderer = create_renderer(cfg.renderer).to(device)
    model = Model(
        points,
        normals,
        colors,
        renderer,
        device=device,
        texture=texture,
        **cfg.model.model_kwargs,
    ).to(device=device)

    return model
Exemple #14
0
    def __init__(
        self,
        batch_size=2,
        radius=0.2,
        camextr=None,
        hand_links=None,
        debug=True,
        random_rot=True,
        z_off=0.5,
        y_off=0,
        x_off=0,
        mesh_type="box",
    ):
        super().__init__()
        self.batch_size = batch_size
        if mesh_type == "box":
            box = tricreation.box([1, 1, 1])
            faces = torch.Tensor(np.array(box.faces))
            verts_loc = torch.Tensor(np.array(box.vertices))
        elif mesh_type == "sphere":
            icomesh = py3dutils.ico_sphere(2)
            verts_loc = icomesh.verts_padded()[0]
            faces = icomesh.faces_padded()[0]
        else:
            raise ValueError(f"{mesh_type} not in [sphere|box]")
        # Normalize and save verts
        norm_verts = normalize.normalize_verts(verts_loc, 1)
        self.register_buffer(
            "verts", norm_verts.unsqueeze(0).repeat(batch_size, 1, 1)
        )
        self.register_buffer(
            "faces", faces.unsqueeze(0).repeat(batch_size, 1, 1)
        )

        # Initialize translation and rotation
        rot_vecs = torch.stack(
            [rotutils.get_rotvec6d(random_rot) for _ in range(batch_size)]
        )

        # Initialize rotation parameters
        self.rot_vecs = torch.nn.Parameter(rot_vecs, requires_grad=True)

        # Initialize scale and translation
        self.trans = torch.nn.Parameter(
            norm_verts.new_zeros(batch_size, 3)
            + norm_verts.new([x_off, y_off, z_off]).view(1, 3),
            requires_grad=True,
        )
        # Scale is shared for all object views
        self.scale = torch.nn.Parameter(
            torch.Tensor([radius]), requires_grad=True
        )
Exemple #15
0
def get_initial_sphere_meshes(level, device):
    """
    wrapper around pytorch3d.utils.ico_sphere
    returns mesh at proper position/scale
    """
    init_meshes = ico_sphere(level, device)
    # TODO: Don't use these magic numbers
    init_meshes.scale_verts_(0.25)
    offset = init_meshes.verts_packed().clone()
    offset[:, :] = 0
    # average depth of objects is 1.4m
    # z is negative because of coordinate from convention
    offset[:, 2] = -1.4
    init_meshes.offset_verts_(offset)
    return init_meshes
Exemple #16
0
    def test_backward(self):
        device = torch.device("cuda:0")
        mesh = ico_sphere()
        verts = mesh.verts_packed()
        edges = mesh.edges_packed()
        verts_cuda = verts.clone().to(device)
        edges_cuda = edges.clone().to(device)
        verts.requires_grad = True
        verts_cuda.requires_grad = True

        neighbor_sums_cuda = gather_scatter(verts_cuda, edges_cuda, False)
        neighbor_sums = gather_scatter_python(verts, edges, False)
        neighbor_sums_cuda.sum().backward()
        neighbor_sums.sum().backward()

        self.assertClose(verts.grad.cpu(), verts_cuda.grad.cpu())
Exemple #17
0
    def forward(self, imgs):
        N = imgs.shape[0]
        device = imgs.device

        img_feats = self.backbone(imgs)
        # add view dimension (single view)
        img_feats = [i.unsqueeze(1) for i in img_feats]

        P = [self._get_projection_matrix(N, device)]

        init_meshes = ico_sphere(self.ico_sphere_level, device).extend(N)
        refined_meshes = self.mesh_head(img_feats, init_meshes, P)
        return {
            "voxel_scores": None,
            "meshes_pred": refined_meshes,
        }
Exemple #18
0
def create_data(folder_path='meshes/',
                nb_of_pointclouds=50,
                nb_of_points=5000,
                sphere_level=4,
                normalize_data=True):
    device = torch.device("cuda:0")

    data_path = os.path.join(os.getcwd(), folder_path)
    src_mesh = ico_sphere(sphere_level, device)

    for filename in os.listdir(data_path):
        print(f"{datetime.now()} Starting:{filename}")
        file_path = os.path.join(data_path, filename)
        cur_mesh = utils.load_mesh(file_path)
        cur_deform_verts = deformation.get_deform_verts(
            cur_mesh, nb_of_points, sphere_level)
        data_verts = np.expand_dims(cur_deform_verts.detach().cpu().numpy(),
                                    axis=0)
        data_input = None
        data_output = None
        for _ in range(nb_of_pointclouds):
            data_a = sample_points_from_meshes(
                cur_mesh, nb_of_points).squeeze().cpu().numpy()
            if normalize_data:
                data_a = data_a - np.mean(data_a, axis=0)
                data_a = data_a / np.max(data_a, axis=0)
                data_a_sort_indices = np.argsort(np.linalg.norm(data_a,
                                                                axis=1))
                data_a = data_a[data_a_sort_indices]
            data_a = np.expand_dims(data_a, axis=0)
            data_input = data_a if data_input is None else np.concatenate(
                (data_input, data_a))
            data_output = data_verts if data_output is None else np.concatenate(
                (data_output, data_verts))
        np.save(f'data/{os.path.splitext(filename)[0]}_input.npy', data_input)
        np.save(f'data/{os.path.splitext(filename)[0]}_output.npy',
                data_output)
        deformed_mesh = src_mesh.offset_verts(cur_deform_verts)
        final_verts, final_faces = deformed_mesh.get_mesh_verts_faces(0)
        final_obj = os.path.join(
            'deformed_meshes/',
            f'{os.path.splitext(filename)[0]}_deformed.obj')
        save_obj(final_obj, final_verts, final_faces)
        print(
            f"{datetime.now()} Finished:{filename}, Point Cloud Shape:{data_input.shape} Deform Verts Shape:{data_output.shape}"
        )
Exemple #19
0
    def forward(self,
                imgs,
                z=None):  # z is the latent vector sampled from P(z|x)
        N = imgs.shape[0]
        device = imgs.device

        img_feats = self.backbone(imgs)
        # concat_feats = torch.cat((img_feats,z),dim=1)
        P = self._get_projection_matrix(N, device)
        #         print(P)

        init_meshes = ico_sphere(self.ico_sphere_level, device).extend(N)
        refined_meshes = self.mesh_head(img_feats,
                                        z,
                                        init_meshes,
                                        P,
                                        subdivide=True)
        return None, refined_meshes
def get_deform_verts(target_mesh, points_to_sample=5000, sphere_level=4):
    device = torch.device("cuda:0")

    src_mesh = ico_sphere(sphere_level, device)

    deform_verts = torch.full(src_mesh.verts_packed().shape,
                              0.0,
                              device=device,
                              requires_grad=True)

    learning_rate = 0.01
    num_iter = 500
    w_chamfer = 1.0
    w_edge = 0.05
    w_normal = 0.0005
    w_laplacian = 0.005

    optimizer = torch.optim.Adam([deform_verts],
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))

    for _ in range(num_iter):
        optimizer.zero_grad()

        new_src_mesh = src_mesh.offset_verts(deform_verts)

        sample_trg = sample_points_from_meshes(target_mesh, points_to_sample)
        sample_src = sample_points_from_meshes(new_src_mesh, points_to_sample)

        loss_chamfer, _ = chamfer_distance(sample_trg, sample_src)
        loss_edge = mesh_edge_loss(new_src_mesh)
        loss_normal = mesh_normal_consistency(new_src_mesh)
        loss_laplacian = mesh_laplacian_smoothing(new_src_mesh,
                                                  method="uniform")
        loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian

        loss.backward()
        optimizer.step()
    print(
        f"{datetime.now()} Loss Chamfer:{loss_chamfer * w_chamfer}, Loss Edge:{loss_edge * w_edge}, Loss Normal:{loss_normal * w_normal}, Loss Laplacian:{loss_laplacian * w_laplacian}"
    )

    return deform_verts
    def test_taubin(self):
        N = 3
        device = get_random_cuda_device()

        mesh = ico_sphere(4, device).extend(N)
        ico_verts = mesh.verts_padded()
        ico_faces = mesh.faces_padded()

        rand_noise = torch.rand_like(ico_verts) * 0.2 - 0.1
        z_mask = (ico_verts[:, :, -1] > 0).view(N, -1, 1)
        rand_noise = rand_noise * z_mask
        verts = ico_verts + rand_noise
        mesh = Meshes(verts=verts, faces=ico_faces)

        smooth_mesh = taubin_smoothing(mesh, num_iter=50)
        smooth_verts = smooth_mesh.verts_padded()

        smooth_dist = (smooth_verts - ico_verts).norm(dim=-1).mean()
        dist = (verts - ico_verts).norm(dim=-1).mean()
        self.assertTrue(smooth_dist < dist)
Exemple #22
0
    def test_backward(self):
        device = get_random_cuda_device()
        mesh = ico_sphere()
        verts = mesh.verts_packed()
        edges = mesh.edges_packed()
        verts_cpu = verts.clone()
        edges_cpu = edges.clone()
        verts_cuda = verts.clone().to(device)
        edges_cuda = edges.clone().to(device)
        verts.requires_grad = True
        verts_cpu.requires_grad = True
        verts_cuda.requires_grad = True

        neighbor_sums_cuda = gather_scatter(verts_cuda, edges_cuda, False)
        neighbor_sums_cpu = gather_scatter(verts_cpu, edges_cpu, False)
        neighbor_sums = gather_scatter_python(verts, edges, False)
        randoms = torch.rand_like(neighbor_sums)
        (neighbor_sums_cuda * randoms.to(device)).sum().backward()
        (neighbor_sums_cpu * randoms).sum().backward()
        (neighbor_sums * randoms).sum().backward()

        self.assertClose(verts.grad, verts_cuda.grad.cpu())
        self.assertClose(verts.grad, verts_cpu.grad)
Exemple #23
0
    def __init__(self,
                 batch_size=100,
                 latent_size=128,
                 img_size=128,
                 seed_sphere_divisions=3):
        super(Generator, self).__init__()
        src_mesh = ico_sphere(seed_sphere_divisions)
        output_shape = src_mesh.verts_packed().shape
        self.src_meshes = src_mesh.extend(batch_size).cuda()
        self.layers = LinearNet(latent_size, output_shape).cuda()
        """ Setup rendering. """
        num_views = batch_size
        self.lights = PointLights(location=[[0.0, 0.0, -3.0]], device=device)

        sigma = 1e-4
        raster_settings_silhouette = RasterizationSettings(
            image_size=128,
            blur_radius=np.log(1. / 1e-4 - 1.) * sigma,
            faces_per_pixel=50,
        )
        self.renderer_silhouette = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=None, raster_settings=raster_settings_silhouette),
            shader=SoftSilhouetteShader()).cuda()
faces_idx = faces.verts_idx.to(device)
verts = verts.to(device)

# We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0).
# (scale, center) will be used to bring the predicted mesh to its original center and scale
# Note that normalizing the target mesh, speeds up the optimization but is not necessary!
center = verts.mean(0)
verts = verts - center
scale = max(verts.abs().max(0)[0])
verts = verts / scale

# We construct a Meshes structure for the target mesh
trg_mesh = Meshes(verts=[verts], faces=[faces_idx])

# We initialize the source shape to be a sphere of radius 1
block1 = ico_sphere(2, device)
uppooling1 = SubdivideMeshes(block1)
block2 = uppooling1(block1)
uppooling2 = SubdivideMeshes(block2)
block3 = uppooling2(block2)

#TODO batch size, features

data1 = Data(x=block1.verts_packed().reshape(-1, 3),
             edge_index=block1.edges_packed().reshape(2, -1),
             face=block1.faces_packed().reshape(3, -1))
data1.pos = data1.x

data2 = Data(x=block2.verts_packed().reshape(-1, 3),
             edge_index=block2.edges_packed().reshape(2, -1),
             face=block2.faces_packed().reshape(3, -1))
faces_idx = faces.verts_idx.to(device)
verts = verts.to(device)

# We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0).
# (scale, center) will be used to bring the predicted mesh to its original center and scale
# Note that normalizing the target mesh, speeds up the optimization but is not necessary!
center = verts.mean(0)
verts = verts - center
scale = max(verts.abs().max(0)[0])
verts = verts / scale

# We construct a Meshes structure for the target mesh
trg_mesh = Meshes(verts=[verts], faces=[faces_idx])

# We initialize the source shape to be a sphere of radius 1
src_mesh = ico_sphere(4, device)

###  Visualize the source and target meshes


def plot_pointcloud(mesh, title=""):
    # Sample points uniformly from the surface of the mesh.
    points = sample_points_from_meshes(mesh, 5000)
    x, y, z = points.clone().detach().cpu().squeeze().unbind(1)
    fig = plt.figure(figsize=(5, 5))
    ax = Axes3D(fig)
    ax.scatter3D(x, z, -y)
    ax.set_xlabel('x')
    ax.set_ylabel('z')
    ax.set_zlabel('y')
    ax.set_title(title)
    def test_python_vs_cpu_vs_cuda(self):
        torch.manual_seed(231)
        device = torch.device("cpu")
        image_size = 32
        blur_radius = 0.1**2
        faces_per_pixel = 3

        for d in ["cpu", "cuda"]:
            device = torch.device(d)
            compare_grads = True
            # Mesh with a single face.
            verts1 = torch.tensor(
                [[0.0, 0.6, 0.1], [-0.7, -0.4, 0.5], [0.7, -0.4, 0.7]],
                dtype=torch.float32,
                requires_grad=True,
                device=device,
            )
            faces1 = torch.tensor([[0, 1, 2]],
                                  dtype=torch.int64,
                                  device=device)
            meshes1 = Meshes(verts=[verts1], faces=[faces1])
            args1 = (meshes1, image_size, blur_radius, faces_per_pixel)
            verts2 = verts1.detach().clone()
            verts2.requires_grad = True
            meshes2 = Meshes(verts=[verts2], faces=[faces1])
            args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
            self._compare_impls(
                rasterize_meshes_python,
                rasterize_meshes,
                args1,
                args2,
                verts1,
                verts2,
                compare_grads=compare_grads,
            )

            # Mesh with multiple faces.
            # fmt: off
            verts1 = torch.tensor(
                [
                    [-0.5, 0.0, 0.1],  # noqa: E241, E201
                    [0.0, 0.6, 0.5],  # noqa: E241, E201
                    [0.5, 0.0, 0.7],  # noqa: E241, E201
                    [-0.25, 0.0, 0.9],  # noqa: E241, E201
                    [0.26, 0.5, 0.8],  # noqa: E241, E201
                    [0.76, 0.0, 0.8],  # noqa: E241, E201
                    [-0.41, 0.0, 0.5],  # noqa: E241, E201
                    [0.61, 0.6, 0.6],  # noqa: E241, E201
                    [0.41, 0.0, 0.5],  # noqa: E241, E201
                    [-0.2, 0.0, -0.5],  # noqa: E241, E201
                    [0.3, 0.6, -0.5],  # noqa: E241, E201
                    [0.4, 0.0, -0.5],  # noqa: E241, E201
                ],
                dtype=torch.float32,
                device=device,
                requires_grad=True)
            faces1 = torch.tensor(
                [
                    [1, 0, 2],  # noqa: E241, E201
                    [4, 3, 5],  # noqa: E241, E201
                    [7, 6, 8],  # noqa: E241, E201
                    [10, 9, 11]  # noqa: E241, E201
                ],
                dtype=torch.int64,
                device=device,
            )
            # fmt: on
            meshes = Meshes(verts=[verts1], faces=[faces1])
            args1 = (meshes, image_size, blur_radius, faces_per_pixel)
            verts2 = verts1.clone().detach()
            verts2.requires_grad = True
            meshes2 = Meshes(verts=[verts2], faces=[faces1])
            args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
            self._compare_impls(
                rasterize_meshes_python,
                rasterize_meshes,
                args1,
                args2,
                verts1,
                verts2,
                compare_grads=compare_grads,
            )

            # Icosphere
            meshes = ico_sphere(device=device)
            verts1, faces1 = meshes.get_mesh_verts_faces(0)
            verts1.requires_grad = True
            meshes = Meshes(verts=[verts1], faces=[faces1])
            args1 = (meshes, image_size, blur_radius, faces_per_pixel)
            verts2 = verts1.detach().clone()
            verts2.requires_grad = True
            meshes2 = Meshes(verts=[verts2], faces=[faces1])
            args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
            self._compare_impls(
                rasterize_meshes_python,
                rasterize_meshes,
                args1,
                args2,
                verts1,
                verts2,
                compare_grads=compare_grads,
            )
    def test_cpp_vs_cuda_naive_vs_cuda_binned(self):
        # Make sure that the backward pass runs for all pathways
        image_size = 64  # test is too slow for very large images.
        N = 1
        radius = 0.1**2
        faces_per_pixel = 3

        grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel)
        grad_dist = torch.randn(N, image_size, image_size, faces_per_pixel)
        grad_bary = torch.randn(N, image_size, image_size, faces_per_pixel, 3)

        device = torch.device("cpu")
        meshes = ico_sphere(0, device)
        verts, faces = meshes.get_mesh_verts_faces(0)
        verts.requires_grad = True
        meshes = Meshes(verts=[verts], faces=[faces])

        # Option I: CPU, naive
        args = (meshes, image_size, radius, faces_per_pixel)
        idx1, zbuf1, bary1, dist1 = rasterize_meshes(*args)

        loss = ((zbuf1 * grad_zbuf).sum() + (dist1 * grad_dist).sum() +
                (bary1 * grad_bary).sum())
        loss.backward()
        idx1 = idx1.data.cpu().clone()
        zbuf1 = zbuf1.data.cpu().clone()
        dist1 = dist1.data.cpu().clone()
        grad1 = verts.grad.data.cpu().clone()

        # Option II: CUDA, naive
        device = torch.device("cuda:0")
        meshes = ico_sphere(0, device)
        verts, faces = meshes.get_mesh_verts_faces(0)
        verts.requires_grad = True
        meshes = Meshes(verts=[verts], faces=[faces])

        args = (meshes, image_size, radius, faces_per_pixel, 0, 0)
        idx2, zbuf2, bary2, dist2 = rasterize_meshes(*args)
        grad_zbuf = grad_zbuf.cuda()
        grad_dist = grad_dist.cuda()
        grad_bary = grad_bary.cuda()
        loss = ((zbuf2 * grad_zbuf).sum() + (dist2 * grad_dist).sum() +
                (bary2 * grad_bary).sum())
        loss.backward()
        idx2 = idx2.data.cpu().clone()
        zbuf2 = zbuf2.data.cpu().clone()
        dist2 = dist2.data.cpu().clone()
        grad2 = verts.grad.data.cpu().clone()

        # Option III: CUDA, binned
        device = torch.device("cuda:0")
        meshes = ico_sphere(0, device)
        verts, faces = meshes.get_mesh_verts_faces(0)
        verts.requires_grad = True
        meshes = Meshes(verts=[verts], faces=[faces])

        args = (meshes, image_size, radius, faces_per_pixel, 32, 500)
        idx3, zbuf3, bary3, dist3 = rasterize_meshes(*args)

        loss = ((zbuf3 * grad_zbuf).sum() + (dist3 * grad_dist).sum() +
                (bary3 * grad_bary).sum())
        loss.backward()
        idx3 = idx3.data.cpu().clone()
        zbuf3 = zbuf3.data.cpu().clone()
        dist3 = dist3.data.cpu().clone()
        grad3 = verts.grad.data.cpu().clone()

        # Make sure everything was the same
        self.assertTrue((idx1 == idx2).all().item())
        self.assertTrue((idx1 == idx3).all().item())
        self.assertTrue(torch.allclose(zbuf1, zbuf2, atol=1e-6))
        self.assertTrue(torch.allclose(zbuf1, zbuf3, atol=1e-6))
        self.assertTrue(torch.allclose(dist1, dist2, atol=1e-6))
        self.assertTrue(torch.allclose(dist1, dist3, atol=1e-6))

        self.assertTrue(torch.allclose(grad1, grad2, rtol=5e-3))  # flaky test
        self.assertTrue(torch.allclose(grad1, grad3, rtol=5e-3))
        self.assertTrue(torch.allclose(grad2, grad3, rtol=5e-3))
Exemple #28
0
    def _compare_with_meshes_renderer(self,
                                      image_size,
                                      batch_size=11,
                                      sphere_diameter=0.6):
        """
        Generate a spherical RGB volumetric function and its corresponding mesh
        and check whether MeshesRenderer returns the same images as the
        corresponding ImplicitRenderer.
        """

        # generate NDC camera extrinsics and intrinsics
        cameras = init_cameras(batch_size, image_size=image_size, ndc=True)

        # get rand offset of the volume
        sphere_centroid = torch.randn(batch_size, 3,
                                      device=cameras.device) * 0.1
        sphere_centroid.requires_grad = True

        # init the grid raysampler with the ndc grid
        raysampler = NDCMultinomialRaysampler(
            image_width=image_size[1],
            image_height=image_size[0],
            n_pts_per_ray=256,
            min_depth=0.1,
            max_depth=2.0,
        )

        # get the EA raymarcher
        raymarcher = EmissionAbsorptionRaymarcher()

        # jitter the camera intrinsics a bit for each render
        cameras_randomized = cameras.clone()
        cameras_randomized.principal_point = (
            torch.randn_like(cameras.principal_point) * 0.3)
        cameras_randomized.focal_length = (
            cameras.focal_length +
            torch.randn_like(cameras.focal_length) * 0.2)

        # the list of differentiable camera vars
        cam_vars = ("R", "T", "focal_length", "principal_point")
        # enable the gradient caching for the camera variables
        for cam_var in cam_vars:
            getattr(cameras_randomized, cam_var).requires_grad = True

        # get the implicit renderer
        images_opacities = ImplicitRenderer(
            raysampler=raysampler, raymarcher=raymarcher)(
                cameras=cameras_randomized,
                volumetric_function=spherical_volumetric_function,
                sphere_centroid=sphere_centroid,
                sphere_diameter=sphere_diameter,
            )[0]

        # check that the renderer does not erase gradients
        loss = images_opacities.sum()
        loss.backward()
        for check_var in (
                *[
                    getattr(cameras_randomized, cam_var)
                    for cam_var in cam_vars
                ],
                sphere_centroid,
        ):
            self.assertIsNotNone(check_var.grad)

        # instantiate the corresponding spherical mesh
        ico = ico_sphere(level=4, device=cameras.device).extend(batch_size)
        verts = (torch.nn.functional.normalize(ico.verts_padded(), dim=-1) *
                 sphere_diameter + sphere_centroid[:, None])
        meshes = Meshes(
            verts=verts,
            faces=ico.faces_padded(),
            textures=TexturesVertex(verts_features=(
                torch.nn.functional.normalize(verts, dim=-1) * 0.5 + 0.5)),
        )

        # instantiate the corresponding mesh renderer
        lights = PointLights(device=cameras.device, location=[[0.0, 0.0, 0.0]])
        renderer_textured = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras_randomized,
                raster_settings=RasterizationSettings(
                    image_size=image_size,
                    blur_radius=1e-3,
                    faces_per_pixel=10,
                    z_clip_value=None,
                    perspective_correct=False,
                ),
            ),
            shader=SoftPhongShader(
                device=cameras.device,
                cameras=cameras_randomized,
                lights=lights,
                materials=Materials(
                    ambient_color=((2.0, 2.0, 2.0), ),
                    diffuse_color=((0.0, 0.0, 0.0), ),
                    specular_color=((0.0, 0.0, 0.0), ),
                    shininess=64,
                    device=cameras.device,
                ),
                blend_params=BlendParams(sigma=1e-3,
                                         gamma=1e-4,
                                         background_color=(0.0, 0.0, 0.0)),
            ),
        )

        # get the mesh render
        images_opacities_meshes = renderer_textured(meshes,
                                                    cameras=cameras_randomized,
                                                    lights=lights)

        if DEBUG:
            outdir = tempfile.gettempdir() + "/test_implicit_vs_mesh_renderer"
            os.makedirs(outdir, exist_ok=True)

            frames = []
            for (image_opacity,
                 image_opacity_mesh) in zip(images_opacities,
                                            images_opacities_meshes):
                image, opacity = image_opacity.split([3, 1], dim=-1)
                image_mesh, opacity_mesh = image_opacity_mesh.split([3, 1],
                                                                    dim=-1)
                diff_image = (((image - image_mesh) * 0.5 + 0.5).mean(
                    dim=2, keepdim=True).repeat(1, 1, 3))
                image_pil = Image.fromarray((torch.cat(
                    (
                        image,
                        image_mesh,
                        diff_image,
                        opacity.repeat(1, 1, 3),
                        opacity_mesh.repeat(1, 1, 3),
                    ),
                    dim=1,
                ).detach().cpu().numpy() * 255.0).astype(np.uint8))
                frames.append(image_pil)

            # export gif
            outfile = os.path.join(outdir, "implicit_vs_mesh_render.gif")
            frames[0].save(
                outfile,
                save_all=True,
                append_images=frames[1:],
                duration=batch_size // 15,
                loop=0,
            )
            print(f"exported {outfile}")

            # export concatenated frames
            outfile_cat = os.path.join(outdir, "implicit_vs_mesh_render.png")
            Image.fromarray(
                np.concatenate([np.array(f) for f in frames],
                               axis=0)).save(outfile_cat)
            print(f"exported {outfile_cat}")

        # compare the renders
        diff = (images_opacities - images_opacities_meshes).abs().mean(dim=-1)
        mu_diff = diff.mean(dim=(1, 2))
        std_diff = diff.std(dim=(1, 2))
        self.assertClose(mu_diff, torch.zeros_like(mu_diff), atol=5e-2)
        self.assertClose(std_diff, torch.zeros_like(std_diff), atol=6e-2)
Exemple #29
0
    def _forward_shape(self, features, instances):
        """
        Forward logic for the voxel and mesh refinement branch.

        Args:
            features (list[Tensor]): #level input features for voxel prediction
            instances (list[Instances]): the per-image instances to train/predict meshes.
                In training, they can be the proposals.
                In inference, they can be the predicted boxes.
        Returns:
            In training, a dict of losses.
            In inference, update `instances` with new fields "pred_voxels" & "pred_meshes" and return it.
        """
        if not self.voxel_on and not self.mesh_on:
            return {} if self.training else instances

        if self.training:
            # The loss is only defined on positive proposals.
            proposals, _ = select_foreground_proposals(instances,
                                                       self.num_classes)
            proposal_boxes = [x.proposal_boxes for x in proposals]

            losses = {}
            if self.voxel_on:
                voxel_features = self.voxel_pooler(features, proposal_boxes)
                voxel_logits = self.voxel_head(voxel_features)
                loss_voxel, target_voxels = voxel_rcnn_loss(
                    voxel_logits,
                    proposals,
                    loss_weight=self.voxel_loss_weight)
                losses.update({"loss_voxel": loss_voxel})
                if self._vis:
                    self._misc["target_voxels"] = target_voxels
                if self.cls_agnostic_voxel:
                    with torch.no_grad():
                        vox_in = voxel_logits.sigmoid().squeeze(
                            1)  # (N, V, V, V)
                        init_mesh = cubify(vox_in, self.cubify_thresh)  # 1
                else:
                    raise ValueError(
                        "No support for class specific predictions")

            if self.mesh_on:
                mesh_features = self.mesh_pooler(features, proposal_boxes)
                if not self.voxel_on:
                    if mesh_features.shape[0] > 0:
                        init_mesh = ico_sphere(self.ico_sphere_level,
                                               mesh_features.device)
                        init_mesh = init_mesh.extend(mesh_features.shape[0])
                    else:
                        init_mesh = Meshes(verts=[], faces=[])
                pred_meshes = self.mesh_head(mesh_features, init_mesh)

                # loss weights
                loss_weights = {
                    "chamfer": self.chamfer_loss_weight,
                    "normals": self.normals_loss_weight,
                    "edge": self.edge_loss_weight,
                }

                if not pred_meshes[0].isempty():
                    loss_chamfer, loss_normals, loss_edge, target_meshes = mesh_rcnn_loss(
                        pred_meshes,
                        proposals,
                        loss_weights=loss_weights,
                        gt_num_samples=self.gt_num_samples,
                        pred_num_samples=self.pred_num_samples,
                        gt_coord_thresh=self.gt_coord_thresh,
                    )
                    if self._vis:
                        self._misc["init_meshes"] = init_mesh
                        self._misc["target_meshes"] = target_meshes
                else:
                    loss_chamfer = sum(
                        k.sum() for k in self.mesh_head.parameters()) * 0.0
                    loss_normals = sum(
                        k.sum() for k in self.mesh_head.parameters()) * 0.0
                    loss_edge = sum(k.sum()
                                    for k in self.mesh_head.parameters()) * 0.0

                losses.update({
                    "loss_chamfer": loss_chamfer,
                    "loss_normals": loss_normals,
                    "loss_edge": loss_edge,
                })

            return losses
        else:
            pred_boxes = [x.pred_boxes for x in instances]

            if self.voxel_on:
                voxel_features = self.voxel_pooler(features, pred_boxes)
                voxel_logits = self.voxel_head(voxel_features)
                voxel_rcnn_inference(voxel_logits, instances)
                if self.cls_agnostic_voxel:
                    with torch.no_grad():
                        vox_in = voxel_logits.sigmoid().squeeze(
                            1)  # (N, V, V, V)
                        init_mesh = cubify(vox_in, self.cubify_thresh)  # 1
                else:
                    raise ValueError(
                        "No support for class specific predictions")

            if self.mesh_on:
                mesh_features = self.mesh_pooler(features, pred_boxes)
                if not self.voxel_on:
                    if mesh_features.shape[0] > 0:
                        init_mesh = ico_sphere(self.ico_sphere_level,
                                               mesh_features.device)
                        init_mesh = init_mesh.extend(mesh_features.shape[0])
                    else:
                        init_mesh = Meshes(verts=[], faces=[])
                pred_meshes = self.mesh_head(mesh_features, init_mesh)
                mesh_rcnn_inference(pred_meshes[-1], instances)
            else:
                assert self.voxel_on
                mesh_rcnn_inference(init_mesh, instances)

            return instances
Exemple #30
0
    def test_save_load_icosphere(self):
        # Test that saving a mesh as an off file and loading it results in the
        # same data on the correct device, for all permitted types of textures.
        # Standard test is for random colors, but also check totally white,
        # because there's a different in OFF semantics between "1.0" color (=full)
        # and "1" (= 1/255 color)
        sphere = ico_sphere(0)
        io = IO()
        device = torch.device("cuda:0")

        atlas_padded = torch.rand(1, sphere.faces_list()[0].shape[0], 1, 1, 3)
        atlas = TexturesAtlas(atlas_padded)

        atlas_padded_white = torch.ones(1,
                                        sphere.faces_list()[0].shape[0], 1, 1,
                                        3)
        atlas_white = TexturesAtlas(atlas_padded_white)

        verts_colors_padded = torch.rand(1, sphere.verts_list()[0].shape[0], 3)
        vertex_texture = TexturesVertex(verts_colors_padded)

        verts_colors_padded_white = torch.ones(1,
                                               sphere.verts_list()[0].shape[0],
                                               3)
        vertex_texture_white = TexturesVertex(verts_colors_padded_white)

        # No colors case
        with NamedTemporaryFile(mode="w", suffix=".off") as f:
            io.save_mesh(sphere, f.name)
            f.flush()
            mesh1 = io.load_mesh(f.name, device=device)
        self.assertEqual(mesh1.device, device)
        mesh1 = mesh1.cpu()
        self.assertClose(mesh1.verts_padded(), sphere.verts_padded())
        self.assertClose(mesh1.faces_padded(), sphere.faces_padded())
        self.assertIsNone(mesh1.textures)

        # Atlas case
        sphere.textures = atlas
        with NamedTemporaryFile(mode="w", suffix=".off") as f:
            io.save_mesh(sphere, f.name)
            f.flush()
            mesh2 = io.load_mesh(f.name, device=device)

        self.assertEqual(mesh2.device, device)
        mesh2 = mesh2.cpu()
        self.assertClose(mesh2.verts_padded(), sphere.verts_padded())
        self.assertClose(mesh2.faces_padded(), sphere.faces_padded())
        self.assertClose(mesh2.textures.atlas_padded(),
                         atlas_padded,
                         atol=1e-4)

        # White atlas case
        sphere.textures = atlas_white
        with NamedTemporaryFile(mode="w", suffix=".off") as f:
            io.save_mesh(sphere, f.name)
            f.flush()
            mesh3 = io.load_mesh(f.name)

        self.assertClose(mesh3.textures.atlas_padded(),
                         atlas_padded_white,
                         atol=1e-4)

        # TexturesVertex case
        sphere.textures = vertex_texture
        with NamedTemporaryFile(mode="w", suffix=".off") as f:
            io.save_mesh(sphere, f.name)
            f.flush()
            mesh4 = io.load_mesh(f.name, device=device)

        self.assertEqual(mesh4.device, device)
        mesh4 = mesh4.cpu()
        self.assertClose(mesh4.verts_padded(), sphere.verts_padded())
        self.assertClose(mesh4.faces_padded(), sphere.faces_padded())
        self.assertClose(mesh4.textures.verts_features_padded(),
                         verts_colors_padded,
                         atol=1e-4)

        # white TexturesVertex case
        sphere.textures = vertex_texture_white
        with NamedTemporaryFile(mode="w", suffix=".off") as f:
            io.save_mesh(sphere, f.name)
            f.flush()
            mesh5 = io.load_mesh(f.name)

        self.assertClose(mesh5.textures.verts_features_padded(),
                         verts_colors_padded_white,
                         atol=1e-4)