Exemple #1
0
    def test_incorrect_weights(self):
        N, P1, P2 = 16, 64, 128
        device = torch.device("cuda:0")
        p1 = torch.rand(
            (N, P1, 3), dtype=torch.float32, device=device, requires_grad=True
        )
        p2 = torch.rand(
            (N, P2, 3), dtype=torch.float32, device=device, requires_grad=True
        )

        weights = torch.zeros((N,), dtype=torch.float32, device=device)
        loss, loss_norm = chamfer_distance(
            p1, p2, weights=weights, batch_reduction="mean"
        )
        self.assertClose(loss.cpu(), torch.zeros(()))
        self.assertTrue(loss.requires_grad)
        self.assertClose(loss_norm.cpu(), torch.zeros(()))
        self.assertTrue(loss_norm.requires_grad)

        loss, loss_norm = chamfer_distance(
            p1, p2, weights=weights, batch_reduction="none"
        )
        self.assertClose(loss.cpu(), torch.zeros((N, N)))
        self.assertTrue(loss.requires_grad)
        self.assertClose(loss_norm.cpu(), torch.zeros((N, N)))
        self.assertTrue(loss_norm.requires_grad)

        weights = torch.ones((N,), dtype=torch.float32, device=device) * -1
        with self.assertRaises(ValueError):
            loss, loss_norm = chamfer_distance(p1, p2, weights=weights)

        weights = torch.zeros((N - 1,), dtype=torch.float32, device=device)
        with self.assertRaises(ValueError):
            loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
Exemple #2
0
    def test_chamfer_pointcloud_object_withnormals(self):
        N = 5
        P1, P2 = 100, 100
        device = "cuda:0"

        reductions = [
            ("sum", "sum"),
            ("mean", "sum"),
            ("sum", "mean"),
            ("mean", "mean"),
            ("sum", None),
            ("mean", None),
        ]
        for (point_reduction, batch_reduction) in reductions:

            # Reinitialize all the tensors so that the
            # backward pass can be computed.
            points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)

            # Chamfer with pointclouds as input.
            cham_cloud, norm_cloud = chamfer_distance(
                points_normals.cloud1,
                points_normals.cloud2,
                point_reduction=point_reduction,
                batch_reduction=batch_reduction,
            )

            # Chamfer with tensors as input.
            cham_tensor, norm_tensor = chamfer_distance(
                points_normals.p1,
                points_normals.p2,
                x_lengths=points_normals.p1_lengths,
                y_lengths=points_normals.p2_lengths,
                x_normals=points_normals.n1,
                y_normals=points_normals.n2,
                point_reduction=point_reduction,
                batch_reduction=batch_reduction,
            )

            self.assertClose(cham_cloud, cham_tensor)
            self.assertClose(norm_cloud, norm_tensor)
            self._check_gradients(
                cham_tensor,
                norm_tensor,
                cham_cloud,
                norm_cloud,
                points_normals.cloud1.points_list(),
                points_normals.p1,
                points_normals.cloud2.points_list(),
                points_normals.p2,
                points_normals.cloud1.normals_list(),
                points_normals.n1,
                points_normals.cloud2.normals_list(),
                points_normals.n2,
                points_normals.p1_lengths,
                points_normals.p2_lengths,
            )
    def test_chamfer_pointcloud_object_nonormals(self):
        N = 5
        P1, P2 = 100, 100
        device = get_random_cuda_device()

        reductions = [
            ("sum", "sum"),
            ("mean", "sum"),
            ("sum", "mean"),
            ("mean", "mean"),
            ("sum", None),
            ("mean", None),
        ]
        for (point_reduction, batch_reduction) in reductions:

            # Reinitialize all the tensors so that the
            # backward pass can be computed.
            points_normals = TestChamfer.init_pointclouds(N,
                                                          P1,
                                                          P2,
                                                          device,
                                                          allow_empty=False)

            # Chamfer with pointclouds as input.
            cham_cloud, _ = chamfer_distance(
                points_normals.cloud1,
                points_normals.cloud2,
                point_reduction=point_reduction,
                batch_reduction=batch_reduction,
            )

            # Chamfer with tensors as input.
            cham_tensor, _ = chamfer_distance(
                points_normals.p1,
                points_normals.p2,
                x_lengths=points_normals.p1_lengths,
                y_lengths=points_normals.p2_lengths,
                point_reduction=point_reduction,
                batch_reduction=batch_reduction,
            )

            self.assertClose(cham_cloud, cham_tensor)
            self._check_gradients(
                cham_tensor,
                None,
                cham_cloud,
                None,
                points_normals.cloud1.points_list(),
                points_normals.p1,
                points_normals.cloud2.points_list(),
                points_normals.p2,
                lengths1=points_normals.p1_lengths,
                lengths2=points_normals.p2_lengths,
            )
Exemple #4
0
    def test_chamfer_batch_reduction(self):
        """
        Compare output of vectorized chamfer loss with naive implementation
        for batch_reduction in ["mean", "sum"] and point_reduction = "none".
        """
        N, P1, P2 = 7, 10, 18
        p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
            N, P1, P2
        )

        pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
            p1, p2, p1_normals, p2_normals
        )

        # batch_reduction = "sum".
        loss, loss_norm = chamfer_distance(
            p1,
            p2,
            p1_normals,
            p2_normals,
            weights=weights,
            batch_reduction="sum",
            point_reduction="none",
        )
        pred_loss[0] *= weights.view(N, 1)
        pred_loss[1] *= weights.view(N, 1)
        pred_loss = pred_loss[0].sum() + pred_loss[1].sum()
        self.assertClose(loss, pred_loss)

        pred_loss_norm[0] *= weights.view(N, 1)
        pred_loss_norm[1] *= weights.view(N, 1)
        pred_loss_norm = pred_loss_norm[0].sum() + pred_loss_norm[1].sum()
        self.assertClose(loss_norm, pred_loss_norm)

        # batch_reduction = "mean".
        loss, loss_norm = chamfer_distance(
            p1,
            p2,
            p1_normals,
            p2_normals,
            weights=weights,
            batch_reduction="mean",
            point_reduction="none",
        )

        pred_loss /= weights.sum()
        self.assertClose(loss, pred_loss)

        pred_loss_norm /= weights.sum()
        self.assertClose(loss_norm, pred_loss_norm)

        # Error when point_reduction is not in ["none", "mean", "sum"].
        with self.assertRaises(ValueError):
            chamfer_distance(p1, p2, weights=weights, point_reduction="max")
Exemple #5
0
    def test_invalid_norm(self):
        N, P1, P2 = 7, 10, 18
        device = get_random_cuda_device()
        points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)
        p1 = points_normals.p1
        p2 = points_normals.p2

        with self.assertRaisesRegex(ValueError, "Support for 1 or 2 norm."):
            chamfer_distance(p1, p2, norm=0)

        with self.assertRaisesRegex(ValueError, "Support for 1 or 2 norm."):
            chamfer_distance(p1, p2, norm=3)
    def test_incorrect_inputs(self):
        N, P1, P2 = 7, 10, 18
        device = get_random_cuda_device()
        points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)
        p1 = points_normals.p1
        p2 = points_normals.p2
        p1_normals = points_normals.n1

        # Normals of wrong shape
        with self.assertRaisesRegex(ValueError,
                                    "Expected normals to be of shape"):
            chamfer_distance(p1, p2, x_normals=p1_normals[None])

        # Points of wrong shape
        with self.assertRaisesRegex(ValueError,
                                    "Expected points to be of shape"):
            chamfer_distance(p1[None], p2)

        # Lengths of wrong shape
        with self.assertRaisesRegex(ValueError,
                                    "Expected lengths to be of shape"):
            chamfer_distance(p1,
                             p2,
                             x_lengths=torch.tensor([1, 2, 3], device=device))

        # Points are not a tensor or Pointclouds
        with self.assertRaisesRegex(ValueError,
                                    "Pointclouds objects or torch.Tensor"):
            chamfer_distance(x=[1, 1, 1], y=[1, 1, 1])
Exemple #7
0
 def loss():
     loss, loss_normals = chamfer_distance(p1,
                                           p2,
                                           p1_normals,
                                           p2_normals,
                                           weights=weights)
     torch.cuda.synchronize()
    def test_chamfer_point_batch_reduction_mean(self):
        """
        Compare output of vectorized chamfer loss with naive implementation
        for the default settings (point_reduction = "mean" and batch_reduction = "mean")
        and no normals.
        This tests only uses homogeneous pointclouds.
        """
        N, max_P1, max_P2 = 7, 10, 18
        device = get_random_cuda_device()
        points_normals = TestChamfer.init_pointclouds(N, max_P1, max_P2,
                                                      device)
        p1 = points_normals.p1
        p2 = points_normals.p2
        weights = points_normals.weights
        p11 = p1.detach().clone()
        p22 = p2.detach().clone()
        p11.requires_grad = True
        p22.requires_grad = True
        P1 = p1.shape[1]
        P2 = p2.shape[1]

        pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(p1, p2)

        # point_reduction = "mean".
        loss, loss_norm = chamfer_distance(p11, p22, weights=weights)
        pred_loss = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
        pred_loss *= weights
        pred_loss = pred_loss.sum() / weights.sum()

        self.assertClose(loss, pred_loss)
        self.assertTrue(loss_norm is None)

        # Check gradients
        self._check_gradients(loss, None, pred_loss, None, p1, p11, p2, p22)
Exemple #9
0
    def forward(self, src_mesh):
        loss = 0

        # Sample from target meshes
        target_verts = sample_points_from_meshes(self.target_meshes, 3000)

        if self.consider_loss("chamfer"):
            loss_chamfer, _ = chamfer_distance(target_verts,
                                               src_mesh.verts_padded())
            loss += self.loss_weights["w_chamfer"] * loss_chamfer

        if self.consider_loss("edge"):
            loss_edge = mesh_edge_loss(
                src_mesh)  # and (b) the edge length of the predicted mesh
            loss += self.loss_weights["w_edge"] * loss_edge

        if self.consider_loss("normal"):
            loss_normal = mesh_normal_consistency(
                src_mesh)  # mesh normal consistency
            loss += self.loss_weights["w_normal"] * loss_normal

        if self.consider_loss("laplacian"):
            loss_laplacian = mesh_laplacian_smoothing(
                src_mesh, method="uniform")  # mesh laplacian smoothing
            loss += self.loss_weights["w_laplacian"] * loss_laplacian

        return loss
Exemple #10
0
    def loss(self, src_mesh, src_verts):
        loss = 0

        if self.consider_loss("chamfer"):
            loss_chamfer, _ = chamfer_distance(
                self.target_verts, src_verts
            )  # We compare the two sets of pointclouds by computing (a) the chamfer loss

            loss += self.loss_weights["w_chamfer"] * loss_chamfer

        if self.consider_loss("edge"):
            loss_edge = mesh_edge_loss(
                src_mesh)  # and (b) the edge length of the predicted mesh
            loss += self.loss_weights["w_edge"] * loss_edge

        if self.consider_loss("normal"):
            loss_normal = mesh_normal_consistency(
                src_mesh)  # mesh normal consistency
            loss += self.loss_weights["w_normal"] * loss_normal

        if self.consider_loss("laplacian"):
            loss_laplacian = mesh_laplacian_smoothing(
                src_mesh, method="uniform")  # mesh laplacian smoothing
            loss += self.loss_weights["w_laplacian"] * loss_normal

        if self.consider_loss("arap"):
            for n in range(len(self.target_meshes)):
                loss_arap = arap_loss(self.prev_mesh,
                                      self.prev_verts,
                                      src_verts,
                                      mesh_idx=n)
                loss += self.loss_weights["w_arap"] * loss_arap

        return loss, loss_chamfer
Exemple #11
0
    def evaluate_mesh(self, val_dataloader, it, **kwargs):
        logger_py.info("[Mesh Evaluation]")
        t0 = time.time()
        if not os.path.exists(self.val_dir):
            os.makedirs(self.val_dir)

        eval_list = defaultdict(list)

        mesh_gt = val_dataloader.dataset.get_meshes()
        assert (mesh_gt is not None)
        mesh_gt = mesh_gt.to(device=self.device)

        pointcloud_tgt = val_dataloader.dataset.get_pointclouds(
            num_points=self.n_eval_points)

        mesh = self.generator.generate_mesh({},
                                            with_colors=False,
                                            with_normals=False)
        points_pred = trimesh.sample.sample_surface_even(
            mesh,
            pointcloud_tgt.points_packed().shape[0])
        chamfer_dist = chamfer_distance(
            pointcloud_tgt.points_padded(),
            torch.from_numpy(points_pred).view(1, -1, 3).to(
                device=pointcloud_tgt.points_padded().device,
                dtype=torch.float32))
        eval_dict_mesh = {'chamfer': chamfer_dist.item()}

        # save to "val" dict
        t1 = time.time()
        logger_py.info('[Mesh Evaluation] time ellapsed {}s'.format(t1 - t0))
        if not mesh.is_empty:
            mesh.export(os.path.join(self.val_dir, "%010d.ply" % it))
        return eval_dict_mesh
Exemple #12
0
def get_loss(mesh,
             trg_mesh,
             w_chamfer,
             w_edge,
             w_normal,
             w_laplacian,
             n_points=5000):
    # We sample 5k points from the surface of each mesh
    sample_trg = sample_points_from_meshes(trg_mesh, n_points)
    sample_src = sample_points_from_meshes(mesh, n_points)

    # We compare the two sets of pointclouds by computing (a) the chamfer loss

    loss_chamfer, _ = chamfer_distance(sample_trg, sample_src)

    # and (b) the edge length of the predicted mesh
    loss_edge = mesh_edge_loss(mesh)

    # mesh normal consistency
    loss_normal = mesh_normal_consistency(mesh)

    # mesh laplacian smoothing
    loss_laplacian = mesh_laplacian_smoothing(mesh, method="uniform")

    # Weighted sum of the losses
    loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian

    return loss
Exemple #13
0
    def evaluate_3d(self, val_dataloader, it, **kwargs):
        logger_py.info("[3D Evaluation]")
        t0 = time.time()
        if not os.path.exists(self.val_dir):
            os.makedirs(self.val_dir)

        # create mesh using generator
        pointcloud = self.model.get_point_clouds(
            with_colors=False, with_normals=True,
            require_normals_grad=False)

        pointcloud_tgt = val_dataloader.dataset.get_pointclouds(
            num_points=self.n_eval_points).to(device=pointcloud.device)

        cd_p, cd_n = chamfer_distance(pointcloud_tgt, pointcloud,
                         x_lengths=pointcloud_tgt.num_points_per_cloud(), y_lengths=pointcloud.num_points_per_cloud(),
                         )
        # save to "val" dict
        t1 = time.time()
        logger_py.info('[3D Evaluation] time ellapsed {}s'.format(t1 - t0))
        eval_dict = {'chamfer_point': cd_p.item(), 'chamfer_normal': cd_n.item()}
        self.tb_logger.add_scalars(
            'eval', eval_dict, global_step=it)
        if not pointcloud.is_empty:
            self.tb_logger.add_mesh('eval',
                                    np.array(pointcloud.vertices)[None, ...], global_step=it)
            # mesh.export(os.path.join(self.val_dir, "%010d.ply" % it))
        return eval_dict
Exemple #14
0
    def loss(self, data, epoch):

         
        pred = self.forward(data)  
        # embed()
        # loss_coef = max(1/(2**(epoch//10000)), 0.1)



        # CE_Loss = nn.CrossEntropyLoss()
        # ce_loss = CE_Loss(pred[0][-1][3], data['y_voxels'])
        weight = data['base_plane'].float().cuda()
        CE_Loss = nn.CrossEntropyLoss(reduction='none')
        ce_loss = CE_Loss(pred[0][-1][3], data['y_voxels'].cuda()) * weight
        ce_loss = ce_loss.mean()

        chamfer_loss = torch.tensor(0).float().cuda()
        edge_loss = torch.tensor(0).float().cuda()
        laplacian_loss = torch.tensor(0).float().cuda()
        normal_consistency_loss = torch.tensor(0).float().cuda()  

        for c in range(self.config.num_classes-1):
            target = data['surface_points'][c].cuda() 
            for k, (vertices, faces, _, _, _) in enumerate(pred[c][1:]):

                pred_mesh = Meshes(verts=list(vertices), faces=list(faces))
                pred_points = sample_points_from_meshes(pred_mesh, 3000)

                chamfer_loss +=  chamfer_distance(pred_points, target)[0]
                laplacian_loss +=   mesh_laplacian_smoothing(pred_mesh, method="uniform")
                normal_consistency_loss += mesh_normal_consistency(pred_mesh)
                edge_loss += mesh_edge_loss(pred_mesh)

            # vertices, faces, _, _, _ = pred[c][-1]
            # pred_mesh = Meshes(verts=list(vertices), faces=list(faces))
            # pred_points = sample_points_from_meshes(pred_mesh, 3000)
            #
            # chamfer_loss += chamfer_distance(pred_points, target)[0]*5
            # laplacian_loss += mesh_laplacian_smoothing(pred_mesh, method="uniform")*5
            # normal_consistency_loss += mesh_normal_consistency(pred_mesh)*5
            # edge_loss += mesh_edge_loss(pred_mesh)*5
            #
            # # chamfer_loss = chamfer_loss/2
            # # laplacian_loss = laplacian_loss/2
            # # normal_consistency_loss = normal_consistency_loss/2
            # # edge_loss = edge_loss/2

        loss = 1 * chamfer_loss + 1 * ce_loss + 0.1 * laplacian_loss + 1 * edge_loss + 0.1 * normal_consistency_loss
        # loss = 1 * chamfer_loss + 0.1 * laplacian_loss + 1 * edge_loss + 0.1 * normal_consistency_loss
        # loss = 1 * chamfer_loss + 0.1 * laplacian_loss + loss_coef * edge_loss + 0.1 * normal_consistency_loss

        log = {"loss": loss.detach(),
               "chamfer_loss": chamfer_loss.detach(), 
               # "loss_coef": loss_coef,
               "ce_loss": ce_loss.detach(),
               "normal_consistency_loss": normal_consistency_loss.detach(),
               "edge_loss": edge_loss.detach(),
               "laplacian_loss": laplacian_loss.detach()}
        return loss, log
 def forward(self, target, vs, input) -> Tensor:
     pointsa = torch.stack([self.sample(i) for i in input])
     pointsb = torch.stack(
         [self.sample(mesh, vs[i]) for i, mesh in enumerate(target)])
     loss, _ = chamfer_distance(pointsb, pointsa, point_reduction='sum')
     loss += 1 * self.loss_volumes(
         input, target, vs) + 0.001 * self.loss_areas_ratio(target, vs)
     return loss
Exemple #16
0
    def compute_loss(self, mesh, pcd=None):
        if pcd is None: pcd = self.pcd

        face_loss = pt3loss.point_mesh_face_distance(mesh, pcd)
        edge_loss = pt3loss.point_mesh_edge_distance(mesh, pcd)
        point_loss = pt3loss.chamfer_distance(mesh.verts_padded(), pcd)[0]

        length_loss = pt3loss.mesh_edge_loss(mesh)
        normal_loss = pt3loss.mesh_normal_consistency(mesh)

        mpcd = sample_points_from_meshes(mesh,
                                         2 * pcd.points_padded()[0].shape[0])
        sample_loss, _ = pt3loss.chamfer_distance(mpcd, pcd)

        losses = torch.tensor((face_loss, edge_loss, point_loss, length_loss,
                               normal_loss, sample_loss),
                              requires_grad=True).to(device='cuda')

        return losses
Exemple #17
0
def chamfer_loss(pc1, pc2):
    '''
    Input:
        pc1: [B,3,N]
        pc2: [B,3,N]
    '''
    pc1 = pc1.permute(0, 2, 1)
    pc2 = pc2.permute(0, 2, 1)
    chamfer_dist, _ = chamfer_distance(pc1, pc2)
    return chamfer_dist
Exemple #18
0
 def loss():
     loss, loss_normals = chamfer_distance(
         p1,
         p2,
         x_lengths=l1,
         y_lengths=l2,
         x_normals=p1_normals,
         y_normals=p2_normals,
         weights=weights,
     )
     torch.cuda.synchronize()
Exemple #19
0
    def test_chamfer_point_reduction_mean(self):
        """
        Compare output of vectorized chamfer loss with naive implementation
        for point_reduction = "mean" and batch_reduction = None.
        """
        N, max_P1, max_P2 = 7, 10, 18
        device = get_random_cuda_device()
        points_normals = TestChamfer.init_pointclouds(N, max_P1, max_P2,
                                                      device)
        p1 = points_normals.p1
        p2 = points_normals.p2
        p1_normals = points_normals.n1
        p2_normals = points_normals.n2
        weights = points_normals.weights
        p11 = p1.detach().clone()
        p22 = p2.detach().clone()
        p11.requires_grad = True
        p22.requires_grad = True
        P1 = p1.shape[1]
        P2 = p2.shape[1]

        pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
            p1, p2, x_normals=p1_normals, y_normals=p2_normals)

        # point_reduction = "mean".
        loss, loss_norm = chamfer_distance(
            p11,
            p22,
            x_normals=p1_normals,
            y_normals=p2_normals,
            weights=weights,
            batch_reduction=None,
            point_reduction='mean',
        )
        pred_loss_mean = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
        pred_loss_mean *= weights
        self.assertClose(loss, pred_loss_mean)

        pred_loss_norm_mean = (pred_loss_norm[0].sum(1) / P1 +
                               pred_loss_norm[1].sum(1) / P2)
        pred_loss_norm_mean *= weights
        self.assertClose(loss_norm, pred_loss_norm_mean)

        # Check gradients
        self._check_gradients(
            loss,
            loss_norm,
            pred_loss_mean,
            pred_loss_norm_mean,
            p1,
            p11,
            p2,
            p22,
        )
 def loss():
     loss, loss_normals = chamfer_distance(
         points_normals.p1,
         points_normals.p2,
         x_lengths=l1,
         y_lengths=l2,
         x_normals=points_normals.n1,
         y_normals=points_normals.n2,
         weights=points_normals.weights,
     )
     torch.cuda.synchronize()
Exemple #21
0
def point_loss(model_points, source_pose, target_pose, simi=False):
    source_point = utils.transform_point_cloud(model_points, source_pose)
    target_point = utils.transform_point_cloud(model_points, target_pose)

    if simi:
        distance = chamfer_distance(source_point,
                                    target_point,
                                    point_reduction="sum")[0]
    else:
        distance = F.mse_loss(source_point, target_point, reduction="sum")

    return distance
def get_recon_metrics(model, x, n_particles=1):
    recon = model.generate(x, n_particles)
    recon = recon.detach()

    mse = torch.nn.functional.mse_loss(recon, x)
    mae = torch.nn.functional.l1_loss(recon, x)
    chamfer = chamfer_distance(recon.float(), x.float())[0]

    return {
        'mse': mse,
        'mae': mae,
        'chamfer': chamfer,
    }
Exemple #23
0
 def test_chamfer_default_no_normals(self):
     """
     Compare chamfer loss with naive implementation using default
     input values and no normals.
     """
     N, P1, P2 = 7, 10, 18
     p1, p2, _, _, weights = TestChamfer.init_pointclouds(N, P1, P2)
     pred_loss, _ = TestChamfer.chamfer_distance_naive(p1, p2)
     loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
     pred_loss = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
     pred_loss *= weights
     pred_loss = pred_loss.sum() / weights.sum()
     self.assertClose(loss, pred_loss)
     self.assertTrue(loss_norm is None)
Exemple #24
0
    def _mesh_loss(self, meshes_pred, points_gt, normals_gt):
        """
        Args:
          meshes_pred: Meshes containing N meshes
          points_gt: Tensor of shape NxPx3
          normals_gt: Tensor of shape NxPx3

        Returns:
          total_loss (float): The sum of all losses specific to meshes
          losses (dict): All (unweighted) mesh losses in a dictionary
        """
        device = meshes_pred.verts_list()[0].device
        zero = torch.tensor(0.0).to(device)
        losses = {"chamfer": zero, "normal": zero, "edge": zero}
        if self.upsample_pred_mesh:
            points_pred, normals_pred = sample_points_from_meshes(
                meshes_pred,
                num_samples=self.pred_num_samples,
                return_normals=True)
        else:
            points_pred = meshes_pred.verts_list()
            normals_pred = meshes_pred.verts_normals_list()

        total_loss = torch.tensor(0.0).to(device)
        if points_pred is None or points_gt is None:
            # Sampling failed, so return None
            total_loss = None
            which = "predictions" if points_pred is None else "GT"
            logger.info("WARNING: Sampling %s failed" % (which))
            return total_loss, losses

        losses = {}
        cham_loss, normal_loss = chamfer_distance(points_pred,
                                                  points_gt,
                                                  x_normals=normals_pred,
                                                  y_normals=normals_gt)

        total_loss = total_loss + self.chamfer_weight * cham_loss
        total_loss = total_loss + self.normal_weight * normal_loss
        losses["chamfer"] = cham_loss
        losses["normal"] = normal_loss

        edge_loss = mesh_edge_loss(meshes_pred)
        total_loss = total_loss + self.edge_weight * edge_loss
        losses["edge"] = edge_loss

        return total_loss, losses
Exemple #25
0
def validate_training_SVR(validation_generator, model):
    '''
    This function is used to calculate validation loss during training PointsSVR
    '''
    print("Validating model......")
    with torch.no_grad():
        total_loss = 0
        items = 0
        for input,gtpt,_,_ in validation_generator:   # Image, Point Cloud, model category, model name is given by training generator
            input = input.cuda()    
            gtpt = gtpt.cuda() 
            predpt = model(input) # Predict a spare point cloud
            loss,_ = chamfer_distance(predpt, gtpt)
            total_loss+=loss.item()
            items+=1
            
    return total_loss/items   # Return average validation loss
def compute_chamfer(recon_pts, gt_pts, num_pts=10000):
    np.random.seed(0)

    recon_pts = normalize_pts(recon_pts)
    idx = np.random.choice(len(recon_pts), size=(num_pts), replace=True)
    recon_pts = recon_pts[idx, :]

    gt_pts = normalize_pts(gt_pts)
    idx = np.random.choice(len(gt_pts), size=(num_pts), replace=True)
    gt_pts = gt_pts[idx, :]

    with torch.no_grad():
        recon_pts = torch.from_numpy(recon_pts).float().cuda()[None, ...]
        gt_pts = torch.from_numpy(gt_pts).float().cuda()[None, ...]
        dist, _ = chamfer_distance(recon_pts, gt_pts, batch_reduction=None)
        dist = dist.cpu().squeeze().numpy()
    return dist
Exemple #27
0
def validate_training_AE(validation_generator, model):
    '''
    This function is used to calculate validation loss during training NMF AE
    '''
    print("Validating model......")
    with torch.no_grad():
        total_loss = 0
        items = 0
        for input,_,_ in validation_generator: 
            input = input.cuda()  
            _, _, pred2, face = model(input)   # Point prediction after each deform block and face information (refer figure 4 in paper)
            mesh_p2 = Meshes(verts = pred2, faces = face)  # Construct Differentiable mesh M_p2
            pts2 = sample_points_from_meshes(mesh_p2,num_samples=2562)  # Differentiably sample random points from mesh surfaces
            loss,_ = chamfer_distance(pts2,  input)
            total_loss+=loss.item()
            items+=1
            
    return total_loss/items   # Return average validation loss
    def forward(self, bins, target_depth_maps):
        bin_centers = 0.5 * (bins[:, 1:] + bins[:, :-1])
        n, p = bin_centers.shape
        input_points = bin_centers.view(n, p, 1)  # .shape = n, p, 1
        # n, c, h, w = target_depth_maps.shape

        target_points = target_depth_maps.flatten(1)  # n, hwc
        mask = target_points.ge(1e-3)  # only valid ground truth points
        target_points = [p[m] for p, m in zip(target_points, mask)]
        target_lengths = torch.Tensor([len(t) for t in target_points
                                       ]).long().to(target_depth_maps.device)
        target_points = pad_sequence(
            target_points, batch_first=True).unsqueeze(2)  # .shape = n, T, 1

        loss, _ = chamfer_distance(x=input_points,
                                   y=target_points,
                                   y_lengths=target_lengths)
        return loss
Exemple #29
0
def eval_cls(cls):
    ## change the weight_fn to the expected one
    weight_fn = 'log_{}/chkpt.pth'.format(cls)
    if not os.path.exists(weight_fn):
        print('{} not exists.'.format(weight_fn))
        return

    print('Initializing network')

    state_dict = torch.load(weight_fn)
    print('loading weights from {}'.format(weight_fn))
    net.load_state_dict(state_dict, strict=False)
    net.eval()
    print('Network initialization done')

    test_data_fn = './data/benchmark/{}.npy'.format(cls)
    test_data = np.load(test_data_fn, allow_pickle=True)

    cd_lst = []
    for idx, (pc, gt_q) in enumerate(test_data):
        points = torch.from_numpy(pc).float().to(device).reshape(
            1, num_point, pc.shape[1])
        gt_q = torch.from_numpy(gt_q).float().to(device).reshape(1, 4)

        pred_q, pred_l, weights = net(points)

        rel_q = qmult(pred_q, qconjugate(gt_q))

        rel_q_tiled = rel_q.reshape(nm, 1, 4).repeat(1, pc.shape[0],
                                                     1).reshape(-1, 4)
        points_tiled = points.reshape(1, pc.shape[0],
                                      3).repeat(nm, 1, 1).reshape(-1, 3)

        rotated_pc = qrotate_pc(points_tiled, rel_q_tiled)
        rotated_pc = rotated_pc.reshape(nm, pc.shape[0], 3)

        dists = chamfer_distance(points_tiled.reshape(nm, pc.shape[0], 3),
                                 rotated_pc,
                                 batch_reduction=None)[0]
        best_dist = dists[weights.argmax()].item()

        cd_lst.append(best_dist)

    print('{}: {}'.format(cls, np.mean(cd_lst)))
def get_deform_verts(target_mesh, points_to_sample=5000, sphere_level=4):
    device = torch.device("cuda:0")

    src_mesh = ico_sphere(sphere_level, device)

    deform_verts = torch.full(src_mesh.verts_packed().shape,
                              0.0,
                              device=device,
                              requires_grad=True)

    learning_rate = 0.01
    num_iter = 500
    w_chamfer = 1.0
    w_edge = 0.05
    w_normal = 0.0005
    w_laplacian = 0.005

    optimizer = torch.optim.Adam([deform_verts],
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))

    for _ in range(num_iter):
        optimizer.zero_grad()

        new_src_mesh = src_mesh.offset_verts(deform_verts)

        sample_trg = sample_points_from_meshes(target_mesh, points_to_sample)
        sample_src = sample_points_from_meshes(new_src_mesh, points_to_sample)

        loss_chamfer, _ = chamfer_distance(sample_trg, sample_src)
        loss_edge = mesh_edge_loss(new_src_mesh)
        loss_normal = mesh_normal_consistency(new_src_mesh)
        loss_laplacian = mesh_laplacian_smoothing(new_src_mesh,
                                                  method="uniform")
        loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian

        loss.backward()
        optimizer.step()
    print(
        f"{datetime.now()} Loss Chamfer:{loss_chamfer * w_chamfer}, Loss Edge:{loss_edge * w_edge}, Loss Normal:{loss_normal * w_normal}, Loss Laplacian:{loss_laplacian * w_laplacian}"
    )

    return deform_verts