コード例 #1
0
ファイル: FAUST_remeshed.py プロジェクト: aaroncnu/GeomFmaps
    def __init__(self, config):
        Dataset.__init__(self, 'surreal')

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        # self.network_model = 'shape_matching'  # this is the only type of model here but it comes from KPConc code

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing files
        self.dataset_name = 'FAUST_r'
        self.path = '../../../media/donati/Data1/Datasets/FAUST_r/'
        self.data_folder = 'off_2/'
        self.spectral_folder = 'spectral/'
        self.txt_file = 'FAUST_r_training.txt'
        self.txt_file_test = 'FAUST_r_test.txt'

        ####################################################
        ####################################################
        ####################################################
        # decide the number of shapes to keep in the training set (exp 2 setting)
        self.split = config.split
        self.num_train = config.num_train  # -1 for all

        # Number of eigenvalues kept for this model fmaps
        self.neig = config.neig
        self.neig_full = config.neig_full

        # Number of thread for input pipeline
        self.num_threads = config.input_threads
コード例 #2
0
ファイル: DALES.py プロジェクト: powerteam-urop/KPConv
    def __init__(self, input_threads=8, load_test=False):
        Dataset.__init__(self, 'DALES')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        # ground(1), vegetation(2), cars(3), trucks(4), power lines(5), fences(6), poles(7) and buildings(8).

        self.label_to_names = {
                               1: 'ground',
                               2: 'vegetation',
                               3: 'cars',
                               4: 'trucks',
                               5: 'power_lines',
                               6: 'fences',
                               7: 'poles',
                               8: 'buildings',
                               }


        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.sort([0])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'cloud_segmentation'

        # Number of input threads
        self.num_threads = input_threads

        # Load test set or train set?
        self.load_test = load_test

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        self.path = 'Data/DALES'

        # Path of the training files
        self.train_path = join(self.path, 'train')
        self.test_path = join(self.path, 'test')


        # List of training and test files
        self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply'])
        self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply'])

        # Proportion of validation scenes
        self.all_splits = [0, 1, 2, 3]
        self.validation_split = 1
コード例 #3
0
ファイル: NPM3D.py プロジェクト: zxczrx123/KPConv
    def __init__(self, input_threads=8, load_test=False):
        Dataset.__init__(self, 'NPM3D')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        self.label_to_names = {0: 'unclassified',
                               1: 'ground',
                               2: 'buildings',
                               3: 'poles',
                               4: 'bollards',
                               5: 'trash_cans',
                               6: 'barriers',
                               7: 'pedestrians',
                               8: 'cars',
                               9: 'natural'}


        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.sort([0])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'cloud_segmentation'

        # Number of input threads
        self.num_threads = input_threads

        # Load test set or train set?
        self.load_test = load_test

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        self.path = 'Data/NPM3D'

        # Path of the training files
        self.train_path = join(self.path, 'training_points')
        self.test_path = join(self.path, 'test_points')


        # List of training and test files
        self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply'])
        self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply'])

        # Proportion of validation scenes
        self.all_splits = [0, 1, 2, 3]
        self.validation_split = 1
コード例 #4
0
    def __init__(self, input_threads=8):
        Dataset.__init__(self, 'S3DIS')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        self.label_to_names = {0: 'ceiling',
                               1: 'floor',
                               2: 'wall',
                               3: 'beam',
                               4: 'column',
                               5: 'window',
                               6: 'door',
                               7: 'chair',
                               8: 'table',
                               9: 'bookcase',
                               10: 'sofa',
                               11: 'board',
                               12: 'clutter'}

        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'cloud_segmentation'

        # Number of input threads
        self.num_threads = input_threads

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        self.path = 'Data/Stanford3d/Stanford3dDataset_v1.2'

        # Path of the training files
        self.train_path = 'original_ply'

        # Proportion of validation scenes
        self.cloud_names = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6']
        self.all_splits = [0, 1, 2, 3, 4, 5]
        self.validation_split = 5

        ###################
        # Prepare ply files
        ###################

        self.prepare_S3DIS_ply()
コード例 #5
0
ファイル: Valeo.py プロジェクト: YuckFu/KPConv
    def __init__(self, input_threads=8, load_test=False):
        Dataset.__init__(self, 'Valeo')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        self.label_to_names = {
            0: 'unclassified',
            1: 'ground',
            2: 'buildings',
            3: 'poles',
            4: 'bollards',
            5: 'trash_cans',
            6: 'barriers',
            7: 'pedestrians',
            8: 'cars',
            9: 'natural'
        }

        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.sort([0])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'cloud_segmentation'

        # Number of input threads
        self.num_threads = input_threads

        # Load test set or train set?
        self.load_test = True

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        if 'SSH_CLIENT' in os.environ.keys():
            self.path = '/home/hugues/Data/Valeo'
        else:
            self.path = '/media/hugues/Data/These/Datasets/Valeo'

        # Path of the training files
        self.train_path = join(self.path, 'training_points')
        self.test_path = join(self.path, 'test_points')

        # Proportion of validation scenes
        self.all_splits = [0, 1, 2, 3]
        self.validation_split = 1
コード例 #6
0
    def __init__(self,
                 batch_num,
                 input_pts,
                 dataset_path,
                 pickle_path,
                 input_threads=8):
        """
        Initiation method.
        """
        Dataset.__init__(self, 'kitti')

        self.synset_to_category = {
            '02691156': 'Airplane',
            '02958343': 'Car',
            '03001627': 'Chair',
            '03636649': 'Lamp',
            '04379243': 'Table',
            '02933112': 'Cabinet',
            '04256520': 'Sofa',
            '04530566': 'Boat',
        }

        self.init_synsets()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Number of models
        self.network_model = 'completion'

        # Partial, ids used for each car
        self.partial_points = {}
        self.ids = {}

        ##########################
        # Parameters for the files
        ##########################

        # Path of the dataset src folder
        self.dataset_path = dataset_path
        self.pickle_path = pickle_path

        self.batch_num = batch_num

        # Number of threads
        self.num_threads = input_threads

        self.input_pts = input_pts

        self.pcd_dir = join(self.dataset_path, 'cars')
        self.bbox_dir = join(self.dataset_path, 'bboxes')
        self.tracklets_dir = join(self.dataset_path, 'tracklets')

        self.num_cars = 2401  # TODO: fix hardcoded value
コード例 #7
0
 def __init__(self, files, voxel_size=0.03):
     Dataset.__init__(self, 'Mini')
     self.num_test = 0
     self.anc_points = {"train": [], "test": []}
     self.ids_list = {"train": [], "test": []}
     for filename in files:
         pcd = open3d.read_point_cloud(filename)
         pcd = open3d.voxel_down_sample(pcd, voxel_size=voxel_size)
         points = np.array(pcd.points)
         self.anc_points['test'] += [points]
         self.ids_list['test'] += [filename]
         self.num_test += 1
コード例 #8
0
ファイル: Drill.py プロジェクト: liuxinren/pyramidpoints
    def __init__(self, input_threads=8, load_test=False):
        Dataset.__init__(self, 'Drill')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        self.label_to_names = {0: 'unclassified', 1: 'drill', 2: 'other'}

        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.sort([0])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'cloud_segmentation'

        # Number of input threads
        self.num_threads = input_threads

        # Load test set or train set?
        self.load_test = load_test

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        self.path = 'Data/Drill'

        # Path of the training files
        self.train_path = join(self.path, 'training_points')
        self.test_path = join(self.path, 'test_points')

        # List of training and test files
        self.train_files = np.sort([
            join(self.train_path, f) for f in listdir(self.train_path)
            if f[-4:] == '.ply'
        ])
        self.test_files = np.sort([
            join(self.test_path, f) for f in listdir(self.test_path)
            if f[-4:] == '.ply'
        ])

        # Proportion of validation scenes
        self.all_splits = list(range(0, len(self.train_files)))
        self.validation_split = 5
コード例 #9
0
ファイル: KITTI.py プロジェクト: zhangguanghui1/D3Feat
    def __init__(self, input_threads=8, first_subsampling_dl=0.30, load_test=False):
        Dataset.__init__(self, 'KITTI')
        self.network_model = 'descriptor'
        self.num_threads = input_threads
        self.load_test = load_test
        self.root = 'data/kitti/'
        self.icp_path = 'data/kitti/icp'
        self.voxel_size = first_subsampling_dl
        self.matching_search_voxel_size = first_subsampling_dl * 1.5

        # Initiate containers
        self.anc_points = {'train': [], 'val': [], 'test': []}
        self.files = {'train': [], 'val': [], 'test': []}

        if self.load_test:
            self.prepare_kitti_ply('test')
        else:
            self.prepare_kitti_ply(split='train')
            self.prepare_kitti_ply(split='val')
コード例 #10
0
ファイル: ETH.py プロジェクト: zhangguanghui1/D3Feat
    def __init__(self, input_threads=8, load_test=False):
        Dataset.__init__(self, 'ETH')
        # Type of task conducted on this dataset
        self.network_model = 'descriptor'

        # Number of input threads
        self.num_threads = input_threads

        # Load test set or train set?
        self.load_test = load_test

        ##########################
        # Parameters for the files
        ##########################

        # Initiate containers
        self.anc_points = {'test': []}
        self.ids_list = {'test': []}

        if self.load_test:
            self.prepare_geometry_registration_eth()
        else:
            exit(-1)
コード例 #11
0
    def __init__(self, input_threads=8, voxel_size=0.03, load_test=False):
        Dataset.__init__(self, 'ThreeDMatch')

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'descriptor'

        # Number of input threads
        self.num_threads = input_threads

        # Load test set or train set?
        self.load_test = load_test

        # voxel size
        self.downsample = voxel_size

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        self.root = 'data/3DMatch/'

        # Initiate containers
        self.anc_points = {'train': [], 'val': [], 'test': []}
        self.keypts = {'train': [], 'val': [], 'test': []}
        self.anc_to_pos = {'train': {}, 'val': {}, 'test': {}}
        self.ids_list = {'train': [], 'val': [], 'test': []}

        if self.load_test:
            self.prepare_geometry_registration()
        else:
            self.prepare_3dmatch_ply(split='train')
            self.prepare_3dmatch_ply(split='val')
コード例 #12
0
    def __init__(self, input_threads=8):
        Dataset.__init__(self, 'Semantic3D')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        self.label_to_names = {0: 'unlabeled',
                               1: 'man-made terrain',
                               2: 'natural terrain',
                               3: 'high vegetation',
                               4: 'low vegetation',
                               5: 'buildings',
                               6: 'hard scape',
                               7: 'scanning artefacts',
                               8: 'cars'}

        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.sort([0])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'cloud_segmentation'

        # Number of input threads
        self.num_threads = input_threads

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        self.path = 'Data/Semantic3D'

        # Original data path
        self.original_folder = 'original_data'

        # Path of the training files
        self.train_path = join(self.path, 'ply_subsampled/train')
        self.test_path = join(self.path, 'ply_subsampled/reduced-8')
        #self.test_path = join(self.path, 'ply_subsampled/semantic-8')

        # List of training and test files
        self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply'])
        self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply'])

        # Proportion of validation scenes (chose validation split = 6 for final networks)
        # self.all_splits=[0, 1, 4, 2, 3, 4, 3, 0, 1, 2, 3, 4, 2, 0, 1]
        self.all_splits = [0, 1, 4, 5, 3, 4, 3, 0, 1, 2, 3, 4, 2, 0, 5]
        self.validation_split = 5

        # Ascii files dict for testing
        self.ascii_files = {'MarketplaceFeldkirch_Station4_rgb_intensity-reduced.ply': 'marketsquarefeldkirch4-reduced.labels',
                            'sg27_station10_rgb_intensity-reduced.ply': 'sg27_10-reduced.labels',
                            'sg28_Station2_rgb_intensity-reduced.ply': 'sg28_2-reduced.labels',
                            'StGallenCathedral_station6_rgb_intensity-reduced.ply': 'stgallencathedral6-reduced.labels',
                            'birdfountain_station1_xyz_intensity_rgb.ply': 'birdfountain1.labels',
                            'castleblatten_station1_intensity_rgb.ply': 'castleblatten1.labels',
                            'castleblatten_station5_xyz_intensity_rgb.ply': 'castleblatten5.labels',
                            'marketplacefeldkirch_station1_intensity_rgb.ply': 'marketsquarefeldkirch1.labels',
                            'marketplacefeldkirch_station4_intensity_rgb.ply': 'marketsquarefeldkirch4.labels',
                            'marketplacefeldkirch_station7_intensity_rgb.ply': 'marketsquarefeldkirch7.labels',
                            'sg27_station10_intensity_rgb.ply': 'sg27_10.labels',
                            'sg27_station3_intensity_rgb.ply': 'sg27_3.labels',
                            'sg27_station6_intensity_rgb.ply': 'sg27_6.labels',
                            'sg27_station8_intensity_rgb.ply': 'sg27_8.labels',
                            'sg28_station2_intensity_rgb.ply': 'sg28_2.labels',
                            'sg28_station5_xyz_intensity_rgb.ply': 'sg28_5.labels',
                            'stgallencathedral_station1_intensity_rgb.ply': 'stgallencathedral1.labels',
                            'stgallencathedral_station3_intensity_rgb.ply': 'stgallencathedral3.labels',
                            'stgallencathedral_station6_intensity_rgb.ply': 'stgallencathedral6.labels'}

        ###################
        # Prepare ply files
        ###################

        self.prepare_data()
コード例 #13
0
ファイル: ShapeNetPart.py プロジェクト: YuckFu/KPConv
    def __init__(self, class_name, input_threads=8):
        """
        Initiation method. Give the name of the object class to segment (for example 'Airplane') or 'multi' to segment
        all objects with a single model.
        """
        Dataset.__init__(self, 'ShapeNetPart_' + class_name)

        ###########################
        # Object classes parameters
        ###########################

        # Dict from object labels to names
        self.label_to_names = {
            0: 'Airplane',
            1: 'Bag',
            2: 'Cap',
            3: 'Car',
            4: 'Chair',
            5: 'Earphone',
            6: 'Guitar',
            7: 'Knife',
            8: 'Lamp',
            9: 'Laptop',
            10: 'Motorbike',
            11: 'Mug',
            12: 'Pistol',
            13: 'Rocket',
            14: 'Skateboard',
            15: 'Table'
        }

        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Number of parts for each object
        self.num_parts = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]

        # Type of dataset (one of the class names or 'multi')
        self.ShapeNetPartType = class_name

        if self.ShapeNetPartType == 'multi':

            # Number of models
            self.network_model = 'multi_segmentation'
            self.num_train = 14007
            self.num_test = 2874

        elif self.ShapeNetPartType in self.label_names:

            # Number of models computed when init_subsample_clouds is called
            self.network_model = 'segmentation'
            self.num_train = None
            self.num_test = None

        else:
            raise ValueError(
                'Unsupported ShapenetPart object class : \'{:s}\''.format(
                    self.ShapeNetPartType))

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        if 'SSH_CLIENT' in os.environ.keys():
            self.path = '/home/hugues/Data/ShapeNetPart/shapenetcore_partanno_segmentation_benchmark_v0'
        else:
            self.path = '../1-KPConv/datasets/data/shapenetcore_partanno_segmentation_benchmark_v0'

        # Number of threads
        self.num_threads = input_threads

        ###################
        # Prepare ply files
        ###################

        self.prepare_ShapeNetPart_ply()

        return
コード例 #14
0
    def __init__(self, batch_num, input_pts, dataset_path, input_threads=8):
        """
        Initiation method.
        """
        Dataset.__init__(self, 'pc_shapenetCompletionBenchmark2048')

        self.synset_to_category = {
            '02691156': 'Airplane',
            '02958343': 'Car',
            '03001627': 'Chair',
            '03636649': 'Lamp',
            '04379243': 'Table',
            '02933112': 'Cabinet',
            '04256520': 'Sofa',
            '04530566': 'Boat',
        }

        self.init_synsets()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Number of models
        self.network_model = 'completion'

        # Partial, complete point clouds & categories used for each split
        self.partial_points = {}
        self.complete_points = {}
        self.ids = {}

        ##########################
        # Parameters for the files
        ##########################

        # Path of the dataset src folder
        self.dataset_path = dataset_path

        self.batch_num = batch_num

        # Number of threads
        self.num_threads = input_threads

        self.input_pts = input_pts

        # Load classmaps
        classmap = load_csv(
            join(self.dataset_path, 'synsetoffset2category.txt'))
        self.classmap = {}
        for i in range(classmap.shape[0]):
            self.classmap[str(classmap[i][1]).zfill(8)] = classmap[i][0]

        # Split file paths lists
        self.train_split_file = join(self.dataset_path, 'train.list')
        self.valid_split_file = join(self.dataset_path, 'val.list')
        self.test_split_file = join(self.dataset_path, 'test.list')

        # Split data paths
        self.train_data_paths = sorted([
            join(self.dataset_path, 'train', 'partial',
                 k.rstrip() + '.h5')
            for k in open(self.train_split_file).readlines()
        ])
        self.val_data_paths = sorted([
            join(self.dataset_path, 'val', 'partial',
                 k.rstrip() + '.h5')
            for k in open(self.valid_split_file).readlines()
        ])
        self.test_data_paths = sorted([
            join(self.dataset_path, 'test', 'partial',
                 k.rstrip() + '.h5')
            for k in open(self.test_split_file).readlines()
        ])

        # make datasets dividable by batch num and set num of splits
        self.num_train = int(
            len(self.train_data_paths) / batch_num) * batch_num  # 28974
        self.train_data_paths = self.train_data_paths[0:self.num_train]
        self.num_valid = int(
            len(self.val_data_paths) / batch_num) * batch_num  # 800
        self.val_data_paths = self.val_data_paths[0:self.num_valid]
        self.num_test = int(
            len(self.test_data_paths) / batch_num) * batch_num  # 1184
        self.test_data_paths = self.test_data_paths[0:self.num_test]
コード例 #15
0
    def __init__(self, input_threads=8):
        Dataset.__init__(self, 'ModelNet40')

        ###########################
        # Object classes parameters
        ###########################

        # Dict from labels to names
        self.label_to_names = {0: 'airplane',
                               1: 'bathtub',
                               2: 'bed',
                               3: 'bench',
                               4: 'bookshelf',
                               5: 'bottle',
                               6: 'bowl',
                               7: 'car',
                               8: 'chair',
                               9: 'cone',
                               10: 'cup',
                               11: 'curtain',
                               12: 'desk',
                               13: 'door',
                               14: 'dresser',
                               15: 'flower_pot',
                               16: 'glass_box',
                               17: 'guitar',
                               18: 'keyboard',
                               19: 'lamp',
                               20: 'laptop',
                               21: 'mantel',
                               22: 'monitor',
                               23: 'night_stand',
                               24: 'person',
                               25: 'piano',
                               26: 'plant',
                               27: 'radio',
                               28: 'range_hood',
                               29: 'sink',
                               30: 'sofa',
                               31: 'stairs',
                               32: 'stool',
                               33: 'table',
                               34: 'tent',
                               35: 'toilet',
                               36: 'tv_stand',
                               37: 'vase',
                               38: 'wardrobe',
                               39: 'xbox'}
        # Initiate a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        ####################
        # Dataset parameters
        ####################

        # Type of task conducted on this dataset
        self.network_model = 'classification'

        # Number of input threads
        self.num_threads = input_threads

        ##########################
        # Parameters for the files
        ##########################

        # Path of the folder containing ply files
        if 'SSH_CLIENT' in os.environ.keys():
            self.path = '/home/hugues/Data/ModelNet'
            self.data_folder = 'modelnet40_normal_resampled'
        else:
            self.path = '/media/hugues/Data/These/Datasets/ModelNet'
            self.data_folder = 'ModelNet40_pointnet'

        # Number of models
        self.num_train = 9843
        self.num_test = 2468

        # Number of thread for input pipeline
        self.num_threads = input_threads
コード例 #16
0
ファイル: ShapeNetV1.py プロジェクト: no-materials/kpcn
    def __init__(self, input_threads=8):
        """
        Initiation method.
        """
        Dataset.__init__(self, 'ShapeNetV1')

        ###########################
        # Object classes parameters
        ###########################

        # self.synset_to_category = {
        #     '02691156': 'Airplane',
        #     '02773838': 'Bag',
        #     '02954340': 'Cap',
        #     '02958343': 'Car',
        #     '03001627': 'Chair',
        #     '03261776': 'Earphone',
        #     '03467517': 'Guitar',
        #     '03624134': 'Knife',
        #     '03636649': 'Lamp',
        #     '03642806': 'Laptop',
        #     '03790512': 'Motorbike',
        #     '03797390': 'Mug',
        #     '03948459': 'Pistol',
        #     '04099429': 'Rocket',
        #     '04225987': 'Skateboard',
        #     '04379243': 'Table',
        #     '02933112': 'Cabinet',
        #     '04256520': 'Sofa',
        #     '04530566': 'Boat',
        #     '02818832': 'Bed',
        #     '02828884': 'Bench',
        #     '02871439': 'Bookshelf',
        #     '02924116': 'Bus',
        #     '03211117': 'Display',
        #     '04004475': 'Printer',
        #     '04401088': 'Telephone'
        # }

        self.synset_to_category = {
            '02691156': 'Airplane',
            '02958343': 'Car',
            '03001627': 'Chair',
            '03636649': 'Lamp',
            '04379243': 'Table',
            '02933112': 'Cabinet',
            '04256520': 'Sofa',
            '04530566': 'Boat',
        }

        self.init_synsets()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Number of models
        self.network_model = 'completion'
        self.num_train = 57946  # account for each scan
        self.num_valid = 1600  # account for each scan
        self.num_test = 2400  # account for each scan

        # Partial, complete point clouds & categories used for each split
        self.partial_points = {}
        self.complete_points = {}
        self.categories = {}  # Unused?...

        ##########################
        # Parameters for the files
        ##########################

        # Path of the dataset src folder
        self.dataset_path = '/Volumes/warm_blue/datasets/ShapeNetV1'

        # Path to preprocessed data folder
        self.data_path = join(dirname(dirname(realpath(__file__))), 'data',
                              'shapenetV1')
        if not exists(self.data_path):
            makedirs(self.data_path)

        # Split file paths
        self.train_split_file = join(self.data_path, 'train.list')
        self.valid_split_file = join(self.data_path, 'valid.list')
        self.test_split_file = join(self.data_path, 'test.list')
        self.test_novel_split_file = join(self.data_path, 'test_novel.list')
        self.one_model_split_file = join(self.data_path,
                                         'one_model.list')  # FOR DEBUG

        # Number of threads
        self.num_threads = input_threads

        # Number of scans from virtual depth rendering during partial pc generation in preprocess step
        self.num_scans = 2
        assert self.num_scans == num_scans

        # TODO: I should probably center & rescale to 1m the plys before saving them to disk (I should test via blender)

        return