예제 #1
0
    def __init__(self,
                 usd_filepath: str,
                 cache_dir: str = '../data/USDMeshes'):
        usd_filepath = Path(usd_filepath)
        assert usd_filepath.suffix in ['.usd', '.usda']
        assert usd_filepath.exists(
        ), f'USD file at {usd_filepath} was not found.'

        self.cache = helpers.Cache(get_mesh_attributes,
                                   cache_dir,
                                   cache_key=helpers._get_hash(usd_filepath))
        self.names = self.cache.cached_ids

        stage = Usd.Stage.Open(str(usd_filepath))
        mesh_prims = [x for x in stage.Traverse() if UsdGeom.Mesh(x)]
        uncached_mesh_prims = filter(lambda x: x.GetName() not in self.names,
                                     mesh_prims)
        for mesh_prim in uncached_mesh_prims:
            name = mesh_prim.GetName()
            mesh = UsdGeom.Mesh(mesh_prim)
            face_counts = torch.tensor(mesh.GetFaceVertexCountsAttr().Get())
            if not torch.allclose(face_counts, face_counts[0]):
                log.warn(f'Skipping mesh {name}, not all faces have the same '
                         'number of vertices.')
            else:
                self.cache(name, usd_mesh=mesh)
예제 #2
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, resolution: int = 100, smoothing_iterations: int = 3, mode='Tri',
                 no_progress: bool = False):
        assert mode in ['Tri', 'Quad']

        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'surface_meshes'
        dataset_params = {
            'root': root,
            'categories': categories,
            'train': train,
            'split': split,
            'no_progress': no_progress,
        }
        self.params = {
            'resolution': resolution,
            'smoothing_iterations': smoothing_iterations,
            'mode': mode,
        }

        mesh_dataset = ShapeNet_Meshes(**dataset_params)
        voxel_dataset = ShapeNet_Voxels(**dataset_params, cache_dir=cache_dir, resolutions=[resolution])
        combined_dataset = ShapeNet_Combination([mesh_dataset, voxel_dataset])

        self.names = combined_dataset.names
        self.synset_idxs = combined_dataset.synset_idxs
        self.synsets = combined_dataset.synsets
        self.labels = combined_dataset.labels

        if mode == 'Tri':
            mesh_conversion = tfs.VoxelGridToTriangleMesh(threshold=0.5,
                                                          mode='marching_cubes',
                                                          normalize=False)
        else:
            mesh_conversion = tfs.VoxelGridToQuadMesh(threshold=0.5,
                                                      normalize=False)

        def convert(og_mesh, voxel):
            transforms = tfs.Compose([mesh_conversion,
                                      tfs.MeshLaplacianSmoothing(smoothing_iterations)])

            new_mesh = transforms(voxel)
            new_mesh.vertices = pcfunc.realign(new_mesh.vertices, og_mesh.vertices)
            return {'vertices': new_mesh.vertices, 'faces': new_mesh.faces}

        self.cache_convert = helpers.Cache(convert, self.cache_dir,
                                           cache_key=helpers._get_hash(self.params))

        desc = 'converting to surface meshes'
        for idx in tqdm(range(len(combined_dataset)), desc=desc, disable=no_progress):
            name = combined_dataset.names[idx]
            if name not in self.cache_convert.cached_ids:
                sample = combined_dataset[idx]
                voxel = sample['data'][str(resolution)]
                og_mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                    sample['data']['faces'])
                self.cache_convert(name, og_mesh=og_mesh, voxel=voxel)
예제 #3
0
    def __init__(self, *args, preprocessing_transform=None, preprocessing_params: dict = None,
                 transform=None, no_progress: bool = False, **kwargs):
        """
        Args:
            positional and keyword arguments for initialize(*args, **kwargs) (see class and initialize documentation)
            preprocessing_params (dict): parameters for the preprocessing:
                - 'cache_dir': path to the cached preprocessed data.
                - 'num_workers': number of process used in parallel for preprocessing (default: number of cores)
            preprocessing_transform (Callable): Called on the outputs of _get_data over the indices
                                                from 0 to len(self) during the construction of the dataset,
                                                the preprocessed outputs are then cached to 'cache_dir'.
            transform (Callable): Called on the preprocessed data at __getitem__.
            no_progress (bool): disable tqdm progress bar for preprocessing.
        """
        self.initialize(*args, **kwargs)
        if preprocessing_transform is not None:
            desc = 'Applying preprocessing'
            if preprocessing_params is None:
                preprocessing_params = {}

            cache_dir = preprocessing_params.get('cache_dir')
            assert cache_dir is not None, 'Cache directory is not given'

            self.cache_convert = helpers.Cache(
                preprocessing_transform,
                cache_dir=cache_dir,
                cache_key=helpers._get_hash(repr(preprocessing_transform))
            )

            use_cuda = preprocessing_params.get('use_cuda', False)

            num_workers = preprocessing_params.get('num_workers')
            uncached = [idx for idx in range(len(self)) if self._get_attributes(idx)[
                'name'] not in self.cache_convert.cached_ids]
            if len(uncached) > 0:
                if num_workers == 0:
                    with torch.no_grad():
                        for idx in tqdm(range(len(self)), desc=desc, disable=no_progress):
                            name = self._get_attributes(idx)['name']
                            data = self._get_data(idx)
                            self.cache_convert(name, *data)
                else:
                    p = Pool(num_workers)
                    iterator = p.imap_unordered(
                        _preprocess_task,
                        [(idx, self._get_data, self._get_attributes, self.cache_convert)
                            for idx in uncached])
                    for i in tqdm(range(len(uncached)), desc=desc, disable=no_progress):
                        next(iterator)
        else:
            self.cache_convert = None

        self.transform = transform
예제 #4
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, resolution: int = 100, num_points: int = 5000, occ: bool = False,
                 smoothing_iterations: int = 3, sample_box=True, no_progress: bool = False):
        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'sdf_points'

        self.params = {
            'resolution': resolution,
            'num_points': num_points,
            'occ': occ,
            'smoothing_iterations': smoothing_iterations,
            'sample_box': sample_box,
        }

        surface_mesh_dataset = ShapeNet_Surface_Meshes(root=root,
                                                       cache_dir=cache_dir,
                                                       categories=categories,
                                                       train=train,
                                                       split=split,
                                                       resolution=resolution,
                                                       smoothing_iterations=smoothing_iterations,
                                                       no_progress=no_progress)

        self.names = surface_mesh_dataset.names
        self.synset_idxs = surface_mesh_dataset.synset_idxs
        self.synsets = surface_mesh_dataset.synsets
        self.labels = surface_mesh_dataset.labels

        def convert(mesh):
            sdf = mesh_cvt.trianglemesh_to_sdf(mesh, num_points)
            bbox_true = torch.stack((mesh.vertices.min(dim=0)[0],
                                     mesh.vertices.max(dim=0)[0]), dim=1).view(-1)
            points = 1.05 * (torch.rand(self.params['num_points'], 3).to(mesh.vertices.device) - .5)
            distances = sdf(points)
            return {'points': points, 'distances': distances, 'bbox': bbox_true}

        self.cache_convert = helpers.Cache(convert, self.cache_dir,
                                           cache_key=helpers._get_hash(self.params))

        desc = 'converting to sdf points'
        for idx in tqdm(range(len(surface_mesh_dataset)), desc=desc, disable=no_progress):
            name = surface_mesh_dataset.names[idx]
            if name not in self.cache_convert.cached_ids:
                idx = surface_mesh_dataset.names.index(name)
                sample = surface_mesh_dataset[idx]
                mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                 sample['data']['faces'])

                # Use cuda if available to speed up conversion
                if torch.cuda.is_available():
                    mesh.cuda()
                self.cache_convert(name, mesh=mesh)
예제 #5
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, num_points: int = 5000, smoothing_iterations=3,
                 surface=True, resolution=100, normals=True, no_progress: bool = False):
        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'points'

        dataset_params = {
            'root': root,
            'categories': categories,
            'train': train,
            'split': split,
            'no_progress': no_progress,
        }
        self.params = {
            'num_points': num_points,
            'smoothing_iterations': smoothing_iterations,
            'surface': surface,
            'resolution': resolution,
            'normals': normals,
        }

        if surface:
            dataset = ShapeNet_Surface_Meshes(**dataset_params,
                                              cache_dir=cache_dir,
                                              resolution=resolution,
                                              smoothing_iterations=smoothing_iterations)
        else:
            dataset = ShapeNet_Meshes(**dataset_params)

        self.names = dataset.names
        self.synset_idxs = dataset.synset_idxs
        self.synsets = dataset.synsets
        self.labels = dataset.labels

        def convert(mesh):
            points, face_choices = mesh_cvt.trianglemesh_to_pointcloud(mesh, num_points)
            face_normals = mesh.compute_face_normals()
            point_normals = face_normals[face_choices]
            return {'points': points, 'normals': point_normals}

        self.cache_convert = helpers.Cache(convert, self.cache_dir,
                                           cache_key=helpers._get_hash(self.params))

        desc = 'converting to points'
        for idx in tqdm(range(len(dataset)), desc=desc, disable=no_progress):
            name = dataset.names[idx]
            if name not in self.cache_convert.cached_ids:
                idx = dataset.names.index(name)
                sample = dataset[idx]
                mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                 sample['data']['faces'])
                self.cache_convert(name, mesh=mesh)
예제 #6
0
def test_cache_tensor(device):
    tensor = torch.ones(5, device=device)

    cache = helpers.Cache(func=lambda x: x,
                          cache_dir=CACHE_DIR,
                          cache_key='test')
    cache('tensor', x=tensor)

    # Make sure cache is created
    assert os.path.exists(os.path.join(CACHE_DIR, 'test', 'tensor.p'))

    # Confirm loaded tensor is correct and on CPU device
    loaded = cache('tensor')
    assert torch.all(loaded.eq(tensor.cpu()))
예제 #7
0
def test_cache_pointcloud(device):
    points = torch.ones(10, 3, device=device)
    pointcloud = kal.rep.PointCloud(points)

    cache = helpers.Cache(func=lambda x: x,
                          cache_dir=CACHE_DIR,
                          cache_key='test')
    cache('pointcloud', x=pointcloud)

    # Make sure cache is created
    assert os.path.exists(os.path.join(CACHE_DIR, 'test', 'pointcloud.p'))

    # Confirm loaded pointcloud is correct and on CPU device
    loaded = cache('pointcloud')
    assert torch.all(loaded.points.eq(points.cpu()))
예제 #8
0
def test_cache_voxelgrid(device):
    voxels = torch.ones(3, 3, 3, device=device)
    voxelgrid = kal.rep.VoxelGrid(voxels)

    cache = helpers.Cache(func=lambda x: x,
                          cache_dir=CACHE_DIR,
                          cache_key='test')
    cache('voxelgrid', x=voxelgrid)

    # Make sure cache is created
    assert os.path.exists(os.path.join(CACHE_DIR, 'test', 'voxelgrid.p'))

    # Confirm loaded voxelgrid is correct and on CPU device
    loaded = cache('voxelgrid')
    assert torch.all(loaded.voxels.eq(voxels.cpu()))
예제 #9
0
def test_cache_mesh(device):
    vertices = torch.ones(10, 3, device=device)
    faces = torch.ones(20, 3, device=device, dtype=torch.long)
    mesh = kal.rep.TriangleMesh.from_tensors(vertices, faces)

    cache = helpers.Cache(func=lambda x: x,
                          cache_dir=CACHE_DIR,
                          cache_key='test')
    cache('mesh', x=mesh)

    # Make sure cache is created
    assert os.path.exists(os.path.join(CACHE_DIR, 'test', 'mesh.p'))

    # Confirm loaded mesh is correct and on CPU device
    loaded = cache('mesh')
    assert torch.all(loaded.vertices.eq(vertices.cpu()))
    assert torch.all(loaded.faces.eq(faces.cpu()))
예제 #10
0
def test_cache_dict(device):
    dictionary = {
        'a': torch.ones(5, device=device),
        'b': np.zeros(5),
    }

    cache = helpers.Cache(func=lambda x: x,
                          cache_dir=CACHE_DIR,
                          cache_key='test')
    cache('dictionary', x=dictionary)

    # Make sure cache is created
    assert os.path.exists(os.path.join(CACHE_DIR, 'test', 'dictionary.p'))

    # Confirm loaded dict is correct and on CPU device
    loaded = cache('dictionary')
    assert torch.all(loaded['a'].eq(dictionary['a'].cpu()))
    assert np.all(np.isclose(loaded['b'], dictionary['b']))