Beispiel #1
0
    def __init__(self):
        # with open('./lib/datasets/dataloader_config.json', 'r') as f:
        #     json_obj = json.load(f)
        # self.root = json_obj['TRAINING_DATA_PATH']
        # self.num_pts = json_obj['NUM_POINTS']
        # self.pc_path = os.path.join(self.root, 'point_cloud')
        # self.label_path = os.path.join(self.root, 'labels')
        # self.frames = [s.split('.')[0] for s in os.listdir(self.pc_path) if '.bin' in s ]
        self.name = 'KITTI'
        self.root = cfg.train_data_path
        self.num_pts = cfg.num_points
        self.pc_path = os.path.join(self.root, 'point_cloud')
        self.label_path = os.path.join(self.root, 'labels')
        self.frames = [s.split('.')[0] for s in os.listdir(self.pc_path) if '.bin' in s ]
        self.num_classes = cfg.num_classes
        if self.num_classes ==1:
            self.num_classes = 0
        self.num_features = cfg.num_features
        self.num_target_attributes = cfg.num_target_attributes
        self.split_ratio = cfg.split_ratio
        self.num_samples = len(self.frames)
        assert np.abs(np.sum(self.split_ratio) - 1.0) < 1e-5
        train_split = int(self.num_samples * self.split_ratio[0])
        val_split = int(self.num_samples * np.sum(self.split_ratio[:2]))

        self.frames_indices = np.arange(len(self.frames))
        # self.train_list = self.frames[:train_split]
        # self.val_list = self.frames[train_split:val_split]
        # self.test_list = self.frames[val_split:]
        self.train_list = self.frames_indices[:train_split]
        self.val_list = self.frames_indices[train_split:val_split]
        self.test_list = self.frames_indices[val_split:]

        self.train_list = DP.shuffle_list(self.train_list)
        self.val_list = DP.shuffle_list(self.val_list)
Beispiel #2
0
    def __init__(self, mode, test_id=None):
        self.name = 'SemanticKITTI'
        self.dataset_path = '/data/WQ/DataSet/semantic-kitti/dataset/sequences_0.06'
        self.label_to_names = {0: 'unlabeled',
                               1: 'car',
                               2: 'bicycle',
                               3: 'motorcycle',
                               4: 'truck',
                               5: 'other-vehicle',
                               6: 'person',
                               7: 'bicyclist',
                               8: 'motorcyclist',
                               9: 'road',
                               10: 'parking',
                               11: 'sidewalk',
                               12: 'other-ground',
                               13: 'building',
                               14: 'fence',
                               15: 'vegetation',
                               16: 'trunk',
                               17: 'terrain',
                               18: 'pole',
                               19: 'traffic-sign'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort([k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.seq_list = np.sort(os.listdir(self.dataset_path))

        if mode == 'test':
            self.test_scan_number = str(test_id)

        self.mode = mode
        train_list, val_list, test_list = DP.get_file_list(self.dataset_path, str(test_id))
        if mode == 'training':
            self.data_list = train_list
        elif mode == 'validation':
            self.data_list = val_list
        elif mode == 'test':
            self.data_list = test_list

        # self.data_list = self.data_list[0:1]
        self.data_list = DP.shuffle_list(self.data_list)

        self.possibility = []
        self.min_possibility = []
        if mode == 'test':
            path_list = self.data_list
            for test_file_name in path_list:
                points = np.load(test_file_name)
                self.possibility += [np.random.rand(points.shape[0]) * 1e-3]
                self.min_possibility += [float(np.min(self.possibility[-1]))]

        cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]
        cfg.class_weights = DP.get_class_weights('SemanticKITTI')
Beispiel #3
0
    def __init__(self, test_id):
        self.name = 'SemanticKITTI'
        self.dataset_path = '/data/semantic_kitti/dataset/sequences_0.06'
        self.label_to_names = {
            0: 'unlabeled',
            1: 'car',
            2: 'bicycle',
            3: 'motorcycle',
            4: 'truck',
            5: 'other-vehicle',
            6: 'person',
            7: 'bicyclist',
            8: 'motorcyclist',
            9: 'road',
            10: 'parking',
            11: 'sidewalk',
            12: 'other-ground',
            13: 'building',
            14: 'fence',
            15: 'vegetation',
            16: 'trunk',
            17: 'terrain',
            18: 'pole',
            19: 'traffic-sign'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.val_split = '08'

        self.seq_list = np.sort(os.listdir(self.dataset_path))
        self.test_scan_number = str(test_id)
        self.train_list, self.val_list, self.test_list = DP.get_file_list(
            self.dataset_path, self.test_scan_number)
        self.train_list = DP.shuffle_list(self.train_list)
        self.val_list = DP.shuffle_list(self.val_list)

        self.possibility = []
        self.min_possibility = []
Beispiel #4
0
    def __init__(self, mode):
        self.name = 'raildata_RandLA'
        self.dataset_path = '/home/hwq/dataset/rail_randla_0.06'
        self.label_to_names = {0: 'unlabeled', 1: 'rail', 2: 'pole'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])  # [0,1,2]
        self.label_to_idx = {l: i
                             for i, l in enumerate(self.label_values)
                             }  # dict {0:0,1:1,2:2}
        self.ignored_labels = np.sort([0])
        self.mode = mode

        fns = sorted(os.listdir(join(self.dataset_path, 'velodyne')))
        train_index = np.load('./utils/rail_index/trainindex.npy')
        test_index = np.load('./utils/rail_index/testindex.npy')

        alldatapath = []
        for fn in fns:
            alldatapath.append(os.path.join(self.dataset_path, fn))
        # print(alldatapath,train_index)

        self.data_list = []
        if mode == 'training':
            for index in train_index:
                self.data_list.append(alldatapath[index])
        elif mode == 'validation':
            for index in test_index:
                self.data_list.append(alldatapath[index])
        elif mode == 'test':
            for index in test_index:
                self.data_list.append(alldatapath[index])
        self.data_list = np.asarray(self.data_list)
        self.data_list = DP.shuffle_list(self.data_list)
        cfg.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        cfg.class_weights = DP.get_class_weights('Rail')