コード例 #1
0
def save_intermediate_results(fnames):
    num_pcs = 1
    k = 8
    for fname in fnames:
        # Problem here: our algorithm is problematic when
        # there is only one grid in one dimension
        # the drill is a long thin object which brings some trouble
        if "drill_shaft" in fname:
            continue
        pc1 = torch.FloatTensor(
            read_ply(fname)[None, :, :3]).cuda()  # no need for normals
        pc2 = torch.FloatTensor(
            read_ply(fname)[None, :, :3]).cuda()  # no need for normals
        torch.save(pc1, "data/pc/" + fname.split('/')[-1][:-4] + '.pt')
        print(fname)
        print(pc1.shape)
        num_points = pc1.shape[1]
        pc1 = normalize_pc(pc1)
        print(pc1.min(dim=1)[0], pc1.max(dim=1)[0])
        pc2 = normalize_pc(pc2)
        lengths1 = torch.ones(
            (num_pcs, ), dtype=torch.long).cuda() * num_points
        lengths2 = torch.ones(
            (num_pcs, ), dtype=torch.long).cuda() * num_points
        dists, idxs, nn, grid = frnn.frnn_grid_points(
            pc1,
            pc2,
            lengths1,
            lengths2,
            K=k,
            r=0.1,
            filename=fname.split('/')[-1])
        print(fname + " done!")
コード例 #2
0
ファイル: trainer.py プロジェクト: aoliao12138/DSS
def viewFromError(nCam, gtImage, predImage, predPoints, projPoints, splatter, offset=None):
    allPositions = torch.from_numpy(read_ply("example_data/pointclouds/sphere_300.ply", nCam)).to(device=splatter.camera.device)[:, :3].unsqueeze(0)
    device = splatter.camera.device
    focalLength = splatter.camera.focalLength
    width = splatter.camera.width
    height = splatter.camera.height
    sv = splatter.camera.sv

    offset = offset or splatter.camera.focalLength*0.5
    fromP = allPositions * offset

    diff = torch.sum((gtImage - predImage).abs(), dim=-1)
    diff = torch.nn.functional.avg_pool2d(diff.unsqueeze(0), 9, stride=4, padding=4, ceil_mode=False, count_include_pad=False).squeeze(0)
    w = diff.argmax() % diff.shape[0]
    h = diff.argmax() // diff.shape[0]
    w *= 4
    h *= 4
    # average points projected inside this region
    _, knn_idx, _ = operations.group_knn(5, torch.tensor([w, h, 1], dtype=projPoints.dtype, device=projPoints.device).view(1, 1, 3).expand(projPoints.shape[0], -1, -1),
                                         projPoints, unique=False, NCHW=False)
    # B, 1, K
    PN = predPoints.shape[0]
    knn_points = torch.gather(predPoints.unsqueeze(1).expand(-1, PN, -1, -1), 2, knn_idx.unsqueeze(-1).expand(-1, -1, -1, predPoints.shape[-1]))
    center = torch.mean(knn_points, dim=-2).to(device=device)
    ups = torch.tensor([0, 0, 1], dtype=center.dtype, device=device).view(1, 1, 3).expand_as(fromP)
    ups = ups + torch.randn_like(ups) * 0.0001
    rotation, position = batchLookAt(fromP, center, ups)
    cameras = []
    for i in range(nCam):
        cam = PinholeCamera(device=device, focalLength=focalLength, width=width, height=height, sv=sv)
        cam.rotation = rotation[:, i, :, :]
        cam.position = position[:, i, :]
        cameras.append(cam)

    return diff.max(), cameras
コード例 #3
0
ファイル: faiss_whole.py プロジェクト: lxxue/FRNN
    def faiss_exact(N, fname, K):
        print(fname, N, K)
        # points1 = torch.load("data/pc/"+fname).cpu().numpy()
        # points2 = torch.load("data/pc/"+fname).cpu().numpy()
        points1 = np.ascontiguousarray(read_ply("data/mesh/" + fname))
        points2 = np.ascontiguousarray(read_ply("data/mesh/" + fname))
        res = faiss.StandardGpuResources()
        flat_config = faiss.GpuIndexFlatConfig()
        flat_config.device = 0
        index = faiss.GpuIndexFlatL2(res, 3, flat_config)
        index.add(points2)
        torch.cuda.synchronize()

        def output():
            for i in range(N):
                D, I = index.search(points1, K)
            torch.cuda.synchronize()

        return output
コード例 #4
0
ファイル: faiss_whole.py プロジェクト: lxxue/FRNN
    def faiss_approximate(N, fname, K):
        print(fname, N, K)
        res = faiss.StandardGpuResources()
        points1 = np.ascontiguousarray(read_ply("data/mesh/" + fname))
        points2 = np.ascontiguousarray(read_ply("data/mesh/" + fname))
        # index = faiss.index_factory(3, "IVF4096, PQ64")
        # index = faiss.index_factory(3, "IVF4096, Flat")
        index = faiss.index_factory(3, "IVF4096, Flat")
        co = faiss.GpuClonerOptions
        co.useFloat16 = True
        index = faiss.index_cpu_to_gpu(res, 0, index)
        torch.cuda.synchronize()

        def output():
            for i in range(N):
                index.train(points2)
                D, I = index.search(points1, K)
            torch.cuda.synchronize()

        return output
コード例 #5
0
ファイル: frnn_drill.py プロジェクト: lxxue/FRNN
import torch
import frnn
from pytorch_points.utils.pc_utils import read_ply

if __name__ == "__main__":
  gpu = torch.device("cuda:0")
  pc = torch.cuda.FloatTensor(read_ply("drill/drill_shaft_vrip.ply")[None, ...])
  # print(pc.shape)
  # print(pc.min(dim=1)[0], pc.max(dim=1)[0])
  # pc -= pc.min(dim=1)[0]
  # pc /= pc.max()
  # print(pc.min(dim=1)[0], pc.max(dim=1)[0])
  lengths = torch.ones((1,), dtype=torch.long, device=gpu) * pc.shape[1]
  # print(lengths)
  dists, idxs, nn, grid = frnn.frnn_grid_points(pc, pc, lengths, lengths, 8, 0.1, None, False, True, 2.0)
  print(dists)
  print(idxs)
  
  
コード例 #6
0
                sorted_points2,
                lengths1,
                lengths2,
                pc2_grid_off,
                sorted_points1_idxs,
                sorted_points2_idxs,
                grid_params_cuda,
                K,
                r,
                r * r,
            )
            torch.cuda.synchronize()

        return output


if __name__ == "__main__":
    fnames = sorted(
        glob.glob('data/mesh/*.ply') + glob.glob('data/mesh/*/*.ply'))
    print(fnames)
    # fnames = ['data/mesh/lucy.ply']
    # fnames = ['data/mesh/drill/drill_shaft_zip.ply'] + fnames
    # save_intermediate_results(fnames)
    for fname in fnames:
        pc = torch.FloatTensor(
            read_ply(fname)[None, :, :3]).cuda()  # no need for normals
        pc -= pc.min(dim=1)[0]
        pc /= pc.max()
        print(pc.min(dim=1)[0], pc.max(dim=1)[0])
        torch.save(pc, "data/pc/" + fname.split('/')[-1][:-4] + '.pt')
コード例 #7
0
ファイル: camera.py プロジェクト: zuru/DSS
    def __init__(self,
                 nCam,
                 offset,
                 focalLength,
                 device=None,
                 points=None,
                 normals=None,
                 camWidth=256,
                 camHeight=256,
                 filename="../example_data/pointclouds/sphere_300.ply",
                 closer=True):
        """
        create camera position from a sphere around shape with descreasing distance
        input:
            nCam:           total number of cameras
            offset:         a number distance to shape surface
            focalLength:    a number
            (optional) points (B,N,3or4)
        allPositions (B,C,3)
        allRotations (B,C,3,3)
        """
        if device is None:
            if points is not None:
                self.device = points.device
            else:
                self.device = torch.cuda.current_device()
        else:
            self.device = device
        self.closer = closer
        if filename is not None:
            self.allPositions = torch.from_numpy(read_ply(
                filename, nCam)).to(device=self.device)[:, :3]
            self.allPositions = self.allPositions.unsqueeze(0)
        else:
            sampleIdx, self.allPositions = operations.furthest_point_sample(
                points.cuda(), nCam, NCHW=False)
            self.allPositions = self.allPositions.to(self.device)
            if normals is not None:
                _, idx, _ = operations.faiss_knn(100,
                                                 self.allPositions.cpu(),
                                                 points.cpu(),
                                                 NCHW=False)
                knn_normals = torch.gather(
                    normals.unsqueeze(1).expand(-1, self.allPositions.shape[1],
                                                -1, -1), 2,
                    idx.unsqueeze(-1).expand(-1, -1, -1, normals.shape[-1]))
                normals = torch.mean(knn_normals, dim=2).to(self.device)

        if points is not None:
            if points.dim() == 2:
                points = points.unsqueeze(0)
            maxP = torch.max(points, dim=1, keepdim=True)[0]
            minP = torch.min(points, dim=1, keepdim=True)[0]
            bb = maxP - minP
            offset = offset + bb
            if normals is not None:
                center = self.allPositions
                # self.allPositions = (torch.mean(normals, dim=1, keepdim=True))
                self.allPositions = normals + (torch.mean(
                    normals, dim=1, keepdim=True))
                self.allPositions += torch.randn_like(self.allPositions) * 0.01
            else:
                center = torch.mean(points, dim=1, keepdim=True)
        else:
            center = torch.zeros([1, 1, 3],
                                 dtype=self.allPositions.dtype,
                                 device=self.allPositions.device)

        self.allPositions = self.allPositions * offset
        self.allPositions = center + self.allPositions
        # Bx1x3
        self.to = center.expand_as(self.allPositions)
        # BxNx3
        # self.ups = torch.tensor([0, 1, 0], dtype=self.to.dtype, device=self.to.device).view(1, 1, 3).expand_as(self.allPositions)
        # for sketchfab
        self.ups = torch.tensor([0, 0, 1],
                                dtype=self.to.dtype,
                                device=self.to.device).view(1, 1, 3).expand_as(
                                    self.allPositions)
        self.ups = self.ups + torch.randn_like(self.ups) * 0.0001
        self.rotation, self.position = batchLookAt(self.allPositions, self.to,
                                                   self.ups)
        self.idx = 0
        self.length = self.rotation.shape[1]
        self.focalLength = focalLength
        self.camWidth = camWidth
        self.camHeight = camHeight