def compute_loss(self, mesh, pcd=None): if pcd is None: pcd = self.pcd face_loss = pt3loss.point_mesh_face_distance(mesh, pcd) edge_loss = pt3loss.point_mesh_edge_distance(mesh, pcd) point_loss = pt3loss.chamfer_distance(mesh.verts_padded(), pcd)[0] length_loss = pt3loss.mesh_edge_loss(mesh) normal_loss = pt3loss.mesh_normal_consistency(mesh) mpcd = sample_points_from_meshes(mesh, 2 * pcd.points_padded()[0].shape[0]) sample_loss, _ = pt3loss.chamfer_distance(mpcd, pcd) losses = torch.tensor((face_loss, edge_loss, point_loss, length_loss, normal_loss, sample_loss), requires_grad=True).to(device='cuda') return losses
def loss(): point_mesh_edge_distance(meshes, pcls) torch.cuda.synchronize()
def test_point_mesh_edge_distance(self): """ Test point_mesh_edge_distance from pytorch3d.loss """ device = get_random_cuda_device() N, V, F, P = 4, 32, 16, 24 meshes, pcls = self.init_meshes_clouds(N, V, F, P, device=device) # clone and detach for another backward pass through the op verts_op = [verts.clone().detach() for verts in meshes.verts_list()] for i in range(N): verts_op[i].requires_grad = True faces_op = [faces.clone().detach() for faces in meshes.faces_list()] meshes_op = Meshes(verts=verts_op, faces=faces_op) points_op = [points.clone().detach() for points in pcls.points_list()] for i in range(N): points_op[i].requires_grad = True pcls_op = Pointclouds(points_op) # Cuda implementation: forward & backward loss_op = point_mesh_edge_distance(meshes_op, pcls_op) # Naive implementation: forward & backward edges_packed = meshes.edges_packed() edges_list = packed_to_list(edges_packed, meshes.num_edges_per_mesh().tolist()) loss_naive = torch.zeros(N, dtype=torch.float32, device=device) for i in range(N): points = pcls.points_list()[i] verts = meshes.verts_list()[i] v_first_idx = meshes.mesh_to_verts_packed_first_idx()[i] edges = verts[edges_list[i] - v_first_idx] num_p = points.shape[0] num_e = edges.shape[0] dists = torch.zeros((num_p, num_e), dtype=torch.float32, device=device) for p in range(num_p): for e in range(num_e): dist = self._point_to_edge_distance(points[p], edges[e]) dists[p, e] = dist min_dist_p, min_idx_p = dists.min(1) min_dist_e, min_idx_e = dists.min(0) loss_naive[i] = min_dist_p.mean() + min_dist_e.mean() loss_naive = loss_naive.mean() # NOTE that hear the comparison holds despite the discrepancy # due to the argmin indices returned by min(). This is because # we don't will compare gradients on the verts and not on the # edges or faces. # Compare forward pass self.assertClose(loss_op, loss_naive) # Compare backward pass rand_val = torch.rand(1).item() grad_dist = torch.tensor(rand_val, dtype=torch.float32, device=device) loss_naive.backward(grad_dist) loss_op.backward(grad_dist) # check verts grad for i in range(N): self.assertClose( meshes.verts_list()[i].grad, meshes_op.verts_list()[i].grad ) self.assertClose(pcls.points_list()[i].grad, pcls_op.points_list()[i].grad)