def backward(ctx, grad_dist1, grad_dist2):
     opt = ctx.opt
     p1, p2, dist1, dist2, idx1, idx2 = ctx.saved_tensors
     grad_p1 = torch.zeros_like(p1)
     grad_p2 = torch.zeros_like(p2)
     if "cuda" in opt.device:
         chamfer.backward(p1, p2, grad_p1, grad_p2, grad_dist1, grad_dist2,
                          idx1, idx2)
     else:
         raise NotImplementedError("CPU version not implemented")
     return None, grad_p1, grad_p2
Exemple #2
0
    def backward(ctx, graddist1, graddist2, gradidx1, gradidx2):
        xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
        graddist1 = graddist1.contiguous()
        graddist2 = graddist2.contiguous()

        gradxyz1 = torch.zeros(xyz1.size())
        gradxyz2 = torch.zeros(xyz2.size())

        gradxyz1 = gradxyz1.cuda()
        gradxyz2 = gradxyz2.cuda()
        chamfer.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
        return gradxyz1, gradxyz2
Exemple #3
0
 def backward(ctx, grad_dist1, grad_dist2):
     xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
     grad_xyz1, grad_xyz2 = chamfer.backward(xyz1, xyz2, idx1, idx2,
                                             grad_dist1, grad_dist2)
     return grad_xyz1, grad_xyz2