def __init__(self, train=True, subsampling=False, first_subsampling_dl=0.02):
        PointCloudDataset.__init__(self, 'Custom')

        self.label_to_names = {0: 'Cyclist',
                               1: 'Pedestrian',
                               2: 'Car'
                               }

        self.first_subsampling_dl = first_subsampling_dl
        self.subsampling = subsampling

        # Initialize a bunch of variables concerning class labels
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort([k for k, v in self.label_to_names.items()])
        self.label_names = [self.label_to_names[k] for k in self.label_values]
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.name_to_label = {v: k for k, v in self.label_to_names.items()}

        self.path = config.DATA['root']
        self.train = train

        # Number of models and models used per epoch
        if self.train:
            self.num_models = 9843
            if config.SETTING['epoch_steps'] and config.SETTING['epoch_steps'] * config.SETTING['batch_num'] < self.num_models:
                self.epoch_n = config.SETTING['epoch_steps'] * config.SETTING['batch_num']
            else:
                self.epoch_n = self.num_models
        else:
            self.num_models = 2468
            self.epoch_n = min(self.num_models, config.SETTING['validation_size'] * config.SETTING['batch_num'])

        self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(orient_correction=True)

        return
예제 #2
0
    def __init__(self, config, train=True, orient_correction=True):
        """
        This dataset is small enough to be stored in-memory, so load all point clouds here
        """
        PointCloudDataset.__init__(self, 'ModelNet40')

        ############
        # Parameters
        ############

        # Dict from labels to names
        self.label_to_names = {0: 'airplane',
                               1: 'bathtub',
                               2: 'bed',
                               3: 'bench',
                               4: 'bookshelf',
                               5: 'bottle',
                               6: 'bowl',
                               7: 'car',
                               8: 'chair',
                               9: 'cone',
                               10: 'cup',
                               11: 'curtain',
                               12: 'desk',
                               13: 'door',
                               14: 'dresser',
                               15: 'flower_pot',
                               16: 'glass_box',
                               17: 'guitar',
                               18: 'keyboard',
                               19: 'lamp',
                               20: 'laptop',
                               21: 'mantel',
                               22: 'monitor',
                               23: 'night_stand',
                               24: 'person',
                               25: 'piano',
                               26: 'plant',
                               27: 'radio',
                               28: 'range_hood',
                               29: 'sink',
                               30: 'sofa',
                               31: 'stairs',
                               32: 'stool',
                               33: 'table',
                               34: 'tent',
                               35: 'toilet',
                               36: 'tv_stand',
                               37: 'vase',
                               38: 'wardrobe',
                               39: 'xbox'}

        # Initialize a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Dataset folder
        self.path = '../../Data/ModelNet40'

        # Type of task conducted on this dataset
        self.dataset_task = 'classification'

        # Update number of class and data task in configuration
        config.num_classes = self.num_classes
        config.dataset_task = self.dataset_task

        # Parameters from config
        self.config = config

        # Training or test set
        self.train = train

        # Number of models and models used per epoch
        if self.train:
            self.num_models = 9843
            if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models:
                self.epoch_n = config.epoch_steps * config.batch_num
            else:
                self.epoch_n = self.num_models
        else:
            self.num_models = 2468
            self.epoch_n = min(self.num_models, config.validation_size * config.batch_num)

        #############
        # Load models
        #############

        if 0 < self.config.first_subsampling_dl <= 0.01:
            raise ValueError('subsampling_parameter too low (should be over 1 cm')

        self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(orient_correction)

        return
예제 #3
0
    def __init__(self,
                 config,
                 train=True,
                 orient_correction=True,
                 spherical_transform=None,
                 transforms=None,
                 self_supervision=False):
        """
        This dataset is small enough to be stored in-memory, so load all point clouds here
        """
        PointCloudDataset.__init__(self, 'ScanObjectNN')
        ######################
        # LOADING ScanObjectNN
        ######################
        self.transforms = transforms

        self.self_supervision = self_supervision

        self.train = train

        #root = './Data/ScanObjectNN/main_split_nobg/'
        root = './Data/ScanObjectNN/split2/'
        if self.self_supervision:
            h5 = h5py.File(
                root + 'training_objectdataset_augmentedrot_scale75.h5', 'r')
            points_train = np.array(h5['data']).astype(np.float32)
            h5.close()
            self.points = points_train
            self.labels = None
        elif train:
            h5 = h5py.File(
                root + 'training_objectdataset_augmentedrot_scale75.h5', 'r')
            self.points = np.array(h5['data']).astype(np.float32)
            self.labels = np.array(h5['label']).astype(int)
            h5.close()
        else:
            h5 = h5py.File(root + 'test_objectdataset_augmentedrot_scale75.h5',
                           'r')
            self.points = np.array(h5['data']).astype(np.float32)
            self.labels = np.array(h5['label']).astype(int)
            h5.close()

        print('Successfully load ScanObjectNN with', len(self.labels),
              'instances')

        # Parameters
        ############
        self.transform = spherical_transform
        # Dict from labels to names
        self.label_to_names = {
            0: 'ag',
            1: 'bin',
            2: 'box',
            3: 'cabinet',
            4: 'chair',
            5: 'desk',
            6: 'display',
            7: 'door',
            8: 'shelf',
            9: 'table',
            10: 'bed',
            11: 'pillow',
            12: 'sink',
            13: 'sofa',
            14: 'toilet',
        }

        # Initialize a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Dataset folder
        self.path = root

        # Type of task conducted on this dataset
        self.dataset_task = 'classification'

        # Update number of class and data task in configuration
        self.num_classes = 15
        config.num_classes = self.num_classes
        config.dataset_task = self.dataset_task

        # Parameters from config
        self.config = config

        # Training or test set
        self.train = train

        # Number of models and models used per epoch
        if self.train:

            #self.num_models = 11416
            self.num_models = 2321
            if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models:
                self.epoch_n = config.epoch_steps * config.batch_num
            else:
                self.epoch_n = self.num_models
        else:
            self.num_models = 2882
            #self.num_models = 581
            #self.epoch_n = min(self.num_models, config.validation_size * config.batch_num)
            self.epoch_n = self.num_models
        #############
        # Load models
        #############

        if 0 < self.config.first_subsampling_dl <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(
            orient_correction)

        return
예제 #4
0
    def __init__(self,
                 config,
                 train=True,
                 orient_correction=True,
                 tranform=None):
        """
        This dataset is small enough to be stored in-memory, so load all point clouds here
        """
        PointCloudDataset.__init__(self, 'ModelNet40')

        ############
        # Parameters
        ############
        self.transform = tranform
        # Dict from labels to names
        self.label_to_names = {
            0: 'bathtub',
            1: 'bed',
            2: 'chair',
            3: 'desk',
            4: 'dresser',
            5: 'monitor',
            6: 'night_stand',
            7: 'sofa',
            8: 'table',
            9: 'toilet',
        }

        # Initialize a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Dataset folder
        self.path = './Data/ModelNet40'

        # Type of task conducted on this dataset
        self.dataset_task = 'classification'

        # Update number of class and data task in configuration
        config.num_classes = self.num_classes
        config.dataset_task = self.dataset_task

        # Parameters from config
        self.config = config

        # Training or test set
        self.train = train

        # Number of models and models used per epoch
        if self.train:
            self.num_models = 3991
            if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models:
                self.epoch_n = config.epoch_steps * config.batch_num
            else:
                self.epoch_n = self.num_models
        else:
            self.num_models = 908
            #self.epoch_n = min(self.num_models, config.validation_size * config.batch_num)
            self.epoch_n = self.num_models
        #############
        # Load models
        #############

        if 0 < self.config.first_subsampling_dl <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(
            orient_correction)
        #self.input_points = normalize(self.input_points)
        return
예제 #5
0
    def __init__(self,
                 config,
                 train=True,
                 orient_correction=True,
                 spherical_transform=None,
                 transforms=None,
                 self_supervision=False):
        """
        This dataset is small enough to be stored in-memory, so load all point clouds here
        """
        PointCloudDataset.__init__(self, 'ScanNet')
        ######################
        # LOADING ScanNet
        ######################
        self.transforms = transforms

        self.self_supervision = self_supervision

        self.train = train

        root = './Data/ScanNet/'
        train_files = "./Data/ScanNet/cls/train_files.txt"
        val_files = "./Data/ScanNet/cls/test_files.txt"
        if self.self_supervision:
            self.points, self.labels = load_cls_train_val(train_files,
                                                          train=True)
            self.points = self.points[:, :, :3]
        elif train:
            self.points, self.labels = load_cls_train_val(train_files,
                                                          train=True)
            self.points = self.points[:, :, :3]
        else:
            self.points, self.labels = load_cls_train_val(val_files,
                                                          train=True)
            self.points = self.points[:, :, :3]
        self.points = normalize(self.points)
        print('Successfully load ScanNet with', len(self.labels), 'instances')

        # Parameters
        ############
        self.transform = spherical_transform
        # Dict from labels to names
        self.label_to_names = {
            0: 'trash',
            1: 'basket',
            2: 'bathub',
            3: 'bed',
            4: 'shelf',
            5: 'cabinet',
            6: 'chair',
            7: 'keyboard',
            8: 'tv',
            9: 'lamp',
            10: 'laptop',
            11: 'microwave',
            12: 'pillow',
            13: 'printer',
            14: 'sofa',
            15: 'stove',
            16: 'table'
        }

        # Initialize a bunch of variables concerning class labels
        self.init_labels()

        # List of classes ignored during training (can be empty)
        self.ignored_labels = np.array([])

        # Dataset folder
        self.path = root

        # Type of task conducted on this dataset
        self.dataset_task = 'classification'

        # Update number of class and data task in configuration
        self.num_classes = 17
        config.num_classes = self.num_classes
        config.dataset_task = self.dataset_task

        # Parameters from config
        self.config = config

        # Training or test set
        self.train = train

        # Number of models and models used per epoch
        if self.train:
            # self.num_models = 2321
            self.num_models = 9423
            if config.epoch_steps and config.epoch_steps * config.batch_num < self.num_models:
                self.epoch_n = config.epoch_steps * config.batch_num
            else:
                self.epoch_n = self.num_models
        else:
            self.num_models = 2634
            # self.epoch_n = min(self.num_models, config.validation_size * config.batch_num)
            self.epoch_n = self.num_models
        #############
        # Load models
        #############

        if 0 < self.config.first_subsampling_dl <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        self.input_points, self.input_normals, self.input_labels = self.load_subsampled_clouds(
            orient_correction)

        return