Beispiel #1
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, resolutions=[128, 32], no_progress: bool = False):
        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'voxels'
        self.cache_transforms = {}
        self.params = {
            'resolutions': resolutions,
        }
        mesh_dataset = ShapeNet_Meshes(root=root,
                                       categories=categories,
                                       train=train,
                                       split=split,
                                       no_progress=no_progress)
        self.names = mesh_dataset.names
        self.synset_idxs = mesh_dataset.synset_idxs
        self.synsets = mesh_dataset.synsets
        self.labels = mesh_dataset.labels

        for res in self.params['resolutions']:
            self.cache_transforms[res] = tfs.CacheCompose([
                tfs.TriangleMeshToVoxelGrid(res, normalize=False, vertex_offset=0.5),
                tfs.FillVoxelGrid(thresh=0.5),
                tfs.ExtractProjectOdmsFromVoxelGrid()
            ], self.cache_dir)

            desc = 'converting to voxels'
            for idx in tqdm(range(len(mesh_dataset)), desc=desc, disable=no_progress):
                name = mesh_dataset.names[idx]
                if name not in self.cache_transforms[res].cached_ids:
                    sample = mesh_dataset[idx]
                    mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                     sample['data']['faces'])
                    self.cache_transforms[res](name, mesh)
Beispiel #2
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, resolution: int = 100, smoothing_iterations: int = 3, mode='Tri',
                 no_progress: bool = False):
        assert mode in ['Tri', 'Quad']

        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'surface_meshes'
        dataset_params = {
            'root': root,
            'categories': categories,
            'train': train,
            'split': split,
            'no_progress': no_progress,
        }
        self.params = {
            'resolution': resolution,
            'smoothing_iterations': smoothing_iterations,
            'mode': mode,
        }

        mesh_dataset = ShapeNet_Meshes(**dataset_params)
        voxel_dataset = ShapeNet_Voxels(**dataset_params, cache_dir=cache_dir, resolutions=[resolution])
        combined_dataset = ShapeNet_Combination([mesh_dataset, voxel_dataset])

        self.names = combined_dataset.names
        self.synset_idxs = combined_dataset.synset_idxs
        self.synsets = combined_dataset.synsets
        self.labels = combined_dataset.labels

        if mode == 'Tri':
            mesh_conversion = tfs.VoxelGridToTriangleMesh(threshold=0.5,
                                                          mode='marching_cubes',
                                                          normalize=False)
        else:
            mesh_conversion = tfs.VoxelGridToQuadMesh(threshold=0.5,
                                                      normalize=False)

        def convert(og_mesh, voxel):
            transforms = tfs.Compose([mesh_conversion,
                                      tfs.MeshLaplacianSmoothing(smoothing_iterations)])

            new_mesh = transforms(voxel)
            new_mesh.vertices = pcfunc.realign(new_mesh.vertices, og_mesh.vertices)
            return {'vertices': new_mesh.vertices, 'faces': new_mesh.faces}

        self.cache_convert = helpers.Cache(convert, self.cache_dir,
                                           cache_key=helpers._get_hash(self.params))

        desc = 'converting to surface meshes'
        for idx in tqdm(range(len(combined_dataset)), desc=desc, disable=no_progress):
            name = combined_dataset.names[idx]
            if name not in self.cache_convert.cached_ids:
                sample = combined_dataset[idx]
                voxel = sample['data'][str(resolution)]
                og_mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                    sample['data']['faces'])
                self.cache_convert(name, og_mesh=og_mesh, voxel=voxel)
Beispiel #3
0
    def __call__(self, voxel: Type[VoxelGrid]):
        """
        Args:
            voxel (torch.Tensor): Voxel grid.

        Returns:
            (TriangleMesh): Converted triangle mesh.
        """
        verts, faces = cvt.voxelgrid_to_trianglemesh(voxel, self.thresh, self.mode, self.normalize)
        return TriangleMesh.from_tensors(vertices=verts, faces=faces)
Beispiel #4
0
    def __getitem__(self, index):
        """Returns the item at index idx. """
        category = torch.tensor(self.cat_idxs[index],
                                dtype=torch.long,
                                device=self.device)
        data = TriangleMesh.from_mesh(self.filepaths[index])
        data.to(self.device)
        if self.transform:
            data = self.transform(data)

        return data, category
Beispiel #5
0
    def __call__(self, sdf: Callable):
        """
        Args:
            sdf (Callable): An object with a .eval_occ function which indicates
                       which of a set of passed points is inside the surface.

        Returns:
            (TriangleMesh): Computed triangle mesh.
        """
        verts, faces = cvt.sdf_to_trianglemesh(sdf, self.bbox_center, self.bbox_dim,
                                       self.resolution, self.upsampling_steps)
        return TriangleMesh.from_tensors(vertices=verts, faces=faces)
Beispiel #6
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, resolution: int = 100, num_points: int = 5000, occ: bool = False,
                 smoothing_iterations: int = 3, sample_box=True, no_progress: bool = False):
        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'sdf_points'

        self.params = {
            'resolution': resolution,
            'num_points': num_points,
            'occ': occ,
            'smoothing_iterations': smoothing_iterations,
            'sample_box': sample_box,
        }

        surface_mesh_dataset = ShapeNet_Surface_Meshes(root=root,
                                                       cache_dir=cache_dir,
                                                       categories=categories,
                                                       train=train,
                                                       split=split,
                                                       resolution=resolution,
                                                       smoothing_iterations=smoothing_iterations,
                                                       no_progress=no_progress)

        self.names = surface_mesh_dataset.names
        self.synset_idxs = surface_mesh_dataset.synset_idxs
        self.synsets = surface_mesh_dataset.synsets
        self.labels = surface_mesh_dataset.labels

        def convert(mesh):
            sdf = mesh_cvt.trianglemesh_to_sdf(mesh, num_points)
            bbox_true = torch.stack((mesh.vertices.min(dim=0)[0],
                                     mesh.vertices.max(dim=0)[0]), dim=1).view(-1)
            points = 1.05 * (torch.rand(self.params['num_points'], 3).to(mesh.vertices.device) - .5)
            distances = sdf(points)
            return {'points': points, 'distances': distances, 'bbox': bbox_true}

        self.cache_convert = helpers.Cache(convert, self.cache_dir,
                                           cache_key=helpers._get_hash(self.params))

        desc = 'converting to sdf points'
        for idx in tqdm(range(len(surface_mesh_dataset)), desc=desc, disable=no_progress):
            name = surface_mesh_dataset.names[idx]
            if name not in self.cache_convert.cached_ids:
                idx = surface_mesh_dataset.names.index(name)
                sample = surface_mesh_dataset[idx]
                mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                 sample['data']['faces'])

                # Use cuda if available to speed up conversion
                if torch.cuda.is_available():
                    mesh.cuda()
                self.cache_convert(name, mesh=mesh)
Beispiel #7
0
    def __init__(self, root: str, cache_dir: str, categories: list = ['chair'], train: bool = True,
                 split: float = .7, num_points: int = 5000, smoothing_iterations=3,
                 surface=True, resolution=100, normals=True, no_progress: bool = False):
        self.root = Path(root)
        self.cache_dir = Path(cache_dir) / 'points'

        dataset_params = {
            'root': root,
            'categories': categories,
            'train': train,
            'split': split,
            'no_progress': no_progress,
        }
        self.params = {
            'num_points': num_points,
            'smoothing_iterations': smoothing_iterations,
            'surface': surface,
            'resolution': resolution,
            'normals': normals,
        }

        if surface:
            dataset = ShapeNet_Surface_Meshes(**dataset_params,
                                              cache_dir=cache_dir,
                                              resolution=resolution,
                                              smoothing_iterations=smoothing_iterations)
        else:
            dataset = ShapeNet_Meshes(**dataset_params)

        self.names = dataset.names
        self.synset_idxs = dataset.synset_idxs
        self.synsets = dataset.synsets
        self.labels = dataset.labels

        def convert(mesh):
            points, face_choices = mesh_cvt.trianglemesh_to_pointcloud(mesh, num_points)
            face_normals = mesh.compute_face_normals()
            point_normals = face_normals[face_choices]
            return {'points': points, 'normals': point_normals}

        self.cache_convert = helpers.Cache(convert, self.cache_dir,
                                           cache_key=helpers._get_hash(self.params))

        desc = 'converting to points'
        for idx in tqdm(range(len(dataset)), desc=desc, disable=no_progress):
            name = dataset.names[idx]
            if name not in self.cache_convert.cached_ids:
                idx = dataset.names.index(name)
                sample = dataset[idx]
                mesh = TriangleMesh.from_tensors(sample['data']['vertices'],
                                                 sample['data']['faces'])
                self.cache_convert(name, mesh=mesh)
Beispiel #8
0
def load_all():
    base = Path('data/ModelNet40')
    all_models = list(base.glob('**/*.off'))
    bad = []
    for model in all_models:
        try:
            data = TriangleMesh.from_off(model)
            samp = data.sample(16)
        except:
            print(model)
            bad.append(model)
            raise
    print(bad)
Beispiel #9
0
    def _read(self, fpath):
        data = np.load(fpath)
        if 'vertices' in data and 'faces' in data:
            verts = torch.from_numpy(data['vertices'])
            faces = torch.from_numpy(data['faces'])
            if data['faces'].shape[-1] == 4:
                data = QuadMesh.from_tensors(verts, faces)
            else:
                data = TriangleMesh.from_tensors(verts, faces)
        else:
            data = torch.from_numpy(data['arr_0'])

        return data
Beispiel #10
0
    def __getitem__(self, index):
        """Returns the item at index idx. """
        data = dict()
        attributes = dict()
        name = self.names[index]
        synset_idx = self.synset_idxs[index]

        data = self.cache_convert(name)
        mesh = TriangleMesh.from_tensors(data['vertices'], data['faces'])
        data['adj'] = mesh.compute_adjacency_matrix_sparse().coalesce()
        attributes['name'] = name
        attributes['synset'] = self.synsets[synset_idx]
        attributes['label'] = self.labels[synset_idx]
        return {'data': data, 'attributes': attributes}
Beispiel #11
0
    def __getitem__(self, index):
        """Returns the item at index idx. """
        data = dict()
        attributes = dict()
        synset_idx = self.synset_idxs[index]
        obj_location = self.paths[index] / 'model.obj'
        mesh = TriangleMesh.from_obj(str(obj_location))

        data['vertices'] = mesh.vertices
        data['faces'] = mesh.faces
        attributes['name'] = self.names[index]
        attributes['path'] = obj_location
        attributes['synset'] = self.synsets[synset_idx]
        attributes['label'] = self.labels[synset_idx]
        return {'data': data, 'attributes': attributes}
Beispiel #12
0
    def _read(self, fpath):
        data = np.load(fpath)
        if 'vertices' in data and 'faces' in data:
            verts = torch.from_numpy(data['vertices'])
            faces = torch.from_numpy(data['faces'])
            if data['faces'].shape[-1] == 4:
                data = QuadMesh.from_tensors(verts, faces)
            else:
                data = TriangleMesh.from_tensors(verts, faces)
        elif 'format' in data:
            matrix_format = data['format'].item()
            sparse = scipy.sparse.csc_matrix(
                (data['data'], data['indices'], data['indptr']), data['shape'])
            data = torch.from_numpy(sparse.todense())
            res = data.size(0)
            data = data.reshape(res, res, res)
        else:
            data = torch.from_numpy(data['arr_0'])

        return data
Beispiel #13
0
    def __getitem__(self, index):
        data = dict()
        attributes = dict()

        img_name = self.img_names[index]
        obj_name = self.obj_names[index]

        view_num = random.randrange(0, self.views)
        # load image and mesh
        img  = Image.open(os.path.join(img_name, 'rendering/{:02d}.png'.format(view_num)))
        mesh = TriangleMesh.from_obj(os.path.join(obj_name, 'models/model_normalized.obj'))
        
        # apply transformations to img
        if self.transform is not None:
            img = self.transform(img)
        else:
            img = torch.FloatTensor(np.array(img))
            img = img.permute(2, 0, 1)
            img = img / 255.
        # load and process cam
        param_location = os.path.join(img_name, 'rendering/rendering_metadata.txt')
        azimuth, elevation, _, distance, _ = np.loadtxt(param_location)[view_num]
        cam_params = kal.mathutils.geometry.transformations.compute_camera_params(azimuth, elevation, distance)

        data['images'] = img
        #data['vertices'] = mesh.vertices
        #data['faces'] = mesh.faces
        data['params'] = dict()
        data['params']['cam_mat'] = cam_params[0]
        data['params']['cam_pos'] = cam_params[1]
        data['params']['azi'] = azimuth
        data['params']['elevation'] = elevation
        data['params']['distance'] = distance
        attributes['img_name'] = img_name
        attributes['obj_name'] = obj_name
        attributes['synset'] = self.synsets[self.synset_idx[index]]
        attributes['label'] = self.labels[self.synset_idx[index]]
        return {'data': data, 'attributes': attributes}
Beispiel #14
0
 def _get_data(self, index):
     synset_idx = self.synset_idxs[index]
     obj_location = self.paths[index] / 'model.obj'
     mesh = TriangleMesh.from_obj(str(obj_location))
     return (mesh, )
Beispiel #15
0
 def _get_data(self, index):
     data = TriangleMesh.from_off(self.filepaths[index])
     return data