Ejemplo n.º 1
0
    def forward(ctx, xyz1, xyz2):
        batchsize, n, _ = xyz1.size()
        _, m, _ = xyz2.size()

        dist1 = torch.zeros(batchsize, n)
        dist2 = torch.zeros(batchsize, m)

        idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
        idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)

        dist1 = dist1.cuda()
        dist2 = dist2.cuda()
        idx1 = idx1.cuda()
        idx2 = idx2.cuda()

        chamfer.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
        ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
        return dist1, dist2
Ejemplo n.º 2
0
 def forward(ctx, opt, p1, p2):
     batch_size = p1.shape[0]
     num_p1_points = p1.shape[1]
     num_p2_points = p2.shape[1]
     dist1 = torch.zeros(batch_size, num_p1_points, device=opt.device)
     dist2 = torch.zeros(batch_size, num_p2_points, device=opt.device)
     idx1 = torch.zeros(batch_size,
                        num_p1_points,
                        dtype=torch.int32,
                        device=opt.device)
     idx2 = torch.zeros(batch_size,
                        num_p2_points,
                        dtype=torch.int32,
                        device=opt.device)
     p1 = p1.contiguous()
     p2 = p2.contiguous()
     if "cuda" in opt.device:
         chamfer.forward(p1, p2, dist1, dist2, idx1, idx2)
     else:
         raise NotImplementedError("CPU version not implemented")
     ctx.opt = opt
     ctx.save_for_backward(p1, p2, dist1, dist2, idx1, idx2)
     return dist1, dist2
Ejemplo n.º 3
0
import sys
import os
for file in os.listdir("build"):
    if file.startswith("lib"):
        sys.path.insert(0, os.path.join("build", file))

# torch must be imported before we import chamfer
import torch
import chamfer

batch_size = 8
n, m = 30, 20

xyz1 = torch.rand((batch_size, n, 3)).cuda()
xyz2 = torch.rand((batch_size, m, 3)).cuda()

dist1 = torch.zeros(batch_size, n).cuda()
dist2 = torch.zeros(batch_size, m).cuda()

idx1 = torch.zeros((batch_size, n), dtype=torch.int).cuda()
idx2 = torch.zeros((batch_size, m), dtype=torch.int).cuda()

chamfer.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
print(dist1)
print(dist2)
print(idx1)
print(idx2)
Ejemplo n.º 4
0
    def forward(ctx, xyz1, xyz2):
        dist1, dist2, idx1, idx2 = chamfer.forward(xyz1, xyz2)
        ctx.save_for_backward(xyz1, xyz2, idx1, idx2)

        return dist1, dist2