Beispiel #1
0
    def __getitem__(self, index):
        fn = self.datapath[index]
        cls = self.classes[self.datapath[index][0]]
        point_set = np.loadtxt(fn[1]).astype(np.float32)
        # print("Origin Point size:", len(point_set))
        seg = np.loadtxt(fn[2]).astype(np.int32)

        point_set, seg = grid_subsampling(point_set,
                                          labels=seg,
                                          sampleDl=self.first_subsampling_dl)

        # Center and rescale point for 1m radius
        pmin = np.min(point_set, axis=0)
        pmax = np.max(point_set, axis=0)
        point_set -= (pmin + pmax) / 2
        scale = np.max(np.linalg.norm(point_set, axis=1))
        point_set *= 1.0 / scale

        if self.data_augmentation and self.split == 'train':
            theta = np.random.uniform(0, np.pi * 2)
            rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],
                                        [np.sin(theta),
                                         np.cos(theta)]])
            # TODO: why only rotate the x and z axis??
            point_set[:, [0, 2]] = point_set[:, [0, 2]].dot(
                rotation_matrix)  # random rotation
            point_set += np.random.normal(
                0, 0.001, size=point_set.shape)  # random jitter

        pcd = make_point_cloud(point_set)
        open3d.estimate_normals(pcd)
        normals = np.array(pcd.normals)

        if self.config.in_features_dim == 1:
            features = np.ones([point_set.shape[0], 1])
        elif self.config.in_features_dim == 4:
            features = np.ones([point_set.shape[0], 1])
            features = np.concatenate([features, point_set], axis=1)
        elif self.config.in_features_dim == 7:
            features = np.ones([point_set.shape[0], 1])
            features = np.concatenate([features, point_set, normals], axis=1)

        if self.classification:
            # manually convert numpy array to Tensor.
            # cls = torch.from_numpy(cls) - 1  # change to 0-based labels
            # cls = torch.from_numpy(np.array([cls]))
            # dict_inputs = segmentation_inputs(point_set, features, cls, self.config)
            # return dict_inputs
            return point_set, features, cls
        else:
            # manually convert numpy array to Tensor.
            # seg = torch.from_numpy(seg) - 1  # change to 0-based labels
            # dict_inputs = segmentation_inputs(point_set, features, seg, self.config)
            # return dict_inputs
            seg = seg - 1
            return point_set, features, seg
    def load_subsampled_clouds(self, orient_correction):
        train_list_path = []
        if self.train is True:
            train_list_file = open(os.path.join(self.path, "train.txt"), 'r')
        else:
            train_list_file = open(os.path.join(self.path, "test.txt"), 'r')

        while True:
            line = train_list_file.readline()
            if not line: 
                break
            train_list_path.append(line)
        
        train_list_file.close()

        # Initialize containers
        input_points = []
        input_normals = []
        label_names = []

        # Collect point clouds
        for i, txt_file_path in enumerate(tqdm(train_list_path)):
            # Read points
            txt_file_path = txt_file_path.replace("\n", "")
            
            class_name = txt_file_path.split("/")[-2:-1]
            label_names.append(class_name[0])
            
            data = np.loadtxt(txt_file_path, delimiter=',', dtype=np.float32)
            
            # Subsample them
            if self.subsampling is True:
                points, normals = grid_subsampling(data[:, :3], features=data[:, 3:], sampleDl=self.first_subsampling_dl)
            else:
                points = data[:, :3]
                normals = data[:, 3:]

            # Add to list
            input_points += [points]
            input_normals += [normals]

        # Get labels
        input_labels = np.array([self.name_to_label[name] for name in label_names])

        if orient_correction:
            input_points = [pp[:, [0, 2, 1]] for pp in input_points]
            input_normals = [nn[:, [0, 2, 1]] for nn in input_normals]

        return input_points, input_normals, input_labels
Beispiel #3
0
    def load_subsampled_clouds(self, orient_correction):

        # Restart timer
        t0 = time.time()

        # Load wanted points if possible
        if self.train:
            split ='training'
        else:
            split = 'test'

        print('\nLoading {:s} points subsampled at {:.3f}'.format(split, self.config.first_subsampling_dl))
        filename = join(self.path, '{:s}_{:.3f}_record.pkl'.format(split, self.config.first_subsampling_dl))

        if exists(filename):
            with open(filename, 'rb') as file:
                input_points, input_normals, input_labels = pickle.load(file)

        # Else compute them from original points
        else:

            # Collect training file names
            if self.train:
                names = np.loadtxt(join(self.path, 'modelnet40_train.txt'), dtype=np.str)
            else:
                names = np.loadtxt(join(self.path, 'modelnet40_test.txt'), dtype=np.str)

            # Initialize containers
            input_points = []
            input_normals = []

            # Advanced display
            N = len(names)
            progress_n = 30
            fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'

            # Collect point clouds
            for i, cloud_name in enumerate(names):

                # Read points
                class_folder = '_'.join(cloud_name.split('_')[:-1])
                txt_file = join(self.path, class_folder, cloud_name) + '.txt'
                data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)

                # Subsample them
                if self.config.first_subsampling_dl > 0:
                    points, normals = grid_subsampling(data[:, :3],
                                                       features=data[:, 3:],
                                                       sampleDl=self.config.first_subsampling_dl)
                else:
                    points = data[:, :3]
                    normals = data[:, 3:]

                print('', end='\r')
                print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True)

                # Add to list
                input_points += [points]
                input_normals += [normals]

            print('', end='\r')
            print(fmt_str.format('#' * progress_n, 100), end='', flush=True)
            print()

            # Get labels
            label_names = ['_'.join(name.split('_')[:-1]) for name in names]
            input_labels = np.array([self.name_to_label[name] for name in label_names])

            # Save for later use
            with open(filename, 'wb') as file:
                pickle.dump((input_points,
                             input_normals,
                             input_labels), file)

        lengths = [p.shape[0] for p in input_points]
        sizes = [l * 4 * 6 for l in lengths]
        print('{:.1f} MB loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0))

        if orient_correction:
            input_points = [pp[:, [0, 2, 1]] for pp in input_points]
            input_normals = [nn[:, [0, 2, 1]] for nn in input_normals]

        return input_points, input_normals, input_labels
Beispiel #4
0
    def __init__(self,
                 root,
                 split='train',
                 first_subsampling_dl=0.03,
                 config=None,
                 data_augmentation=True):
        self.config = config
        self.first_subsampling_dl = first_subsampling_dl
        self.root = root
        self.split = split
        self.data_augmentation = data_augmentation
        self.points, self.normals, self.labels = [], [], []

        # Dict from labels to names
        self.label_to_names = {0: 'airplane',
                               1: 'bathtub',
                               2: 'bed',
                               3: 'bench',
                               4: 'bookshelf',
                               5: 'bottle',
                               6: 'bowl',
                               7: 'car',
                               8: 'chair',
                               9: 'cone',
                               10: 'cup',
                               11: 'curtain',
                               12: 'desk',
                               13: 'door',
                               14: 'dresser',
                               15: 'flower_pot',
                               16: 'glass_box',
                               17: 'guitar',
                               18: 'keyboard',
                               19: 'lamp',
                               20: 'laptop',
                               21: 'mantel',
                               22: 'monitor',
                               23: 'night_stand',
                               24: 'person',
                               25: 'piano',
                               26: 'plant',
                               27: 'radio',
                               28: 'range_hood',
                               29: 'sink',
                               30: 'sofa',
                               31: 'stairs',
                               32: 'stool',
                               33: 'table',
                               34: 'tent',
                               35: 'toilet',
                               36: 'tv_stand',
                               37: 'vase',
                               38: 'wardrobe',
                               39: 'xbox'}
        self.name_to_label = {v: k for k, v in self.label_to_names.items()}

        t0 = time.time()
        # Load wanted points if possible
        print(f'\nLoading {split} points')
        filename = os.path.join(self.root, f'{split}_{first_subsampling_dl:.3f}_record.pkl')
        if os.path.exists(filename):
            with open(filename, 'rb') as file:
                self.points, self.normals, self.labels = pickle.load(file)
        else:
            # Collect training file names
            names = np.loadtxt(os.path.join(self.root, f'modelnet40_{split}.txt'), dtype=np.str)

            # Collect point clouds
            for i, cloud_name in enumerate(names):

                # Read points
                class_folder = '_'.join(cloud_name.split('_')[:-1])
                txt_file = os.path.join(self.root, class_folder, cloud_name) + '.txt'
                data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)

                # Subsample them
                if first_subsampling_dl > 0:
                    points, normals = grid_subsampling(data[:, :3],
                                                       features=data[:, 3:],
                                                       sampleDl=first_subsampling_dl)
                else:
                    points = data[:, :3]
                    normals = data[:, 3:]

                # Add to list
                self.points += [points]
                self.normals += [normals]

            # Get labels
            label_names = ['_'.join(name.split('_')[:-1]) for name in names]
            self.labels = np.array([self.name_to_label[name] for name in label_names])

            # Save for later use
            with open(filename, 'wb') as file:
                pickle.dump((self.points, self.normals, self.labels), file)

        lengths = [p.shape[0] for p in self.points]
        sizes = [l * 4 * 6 for l in lengths]
        print('{:.1f} MB loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0))
Beispiel #5
0
    def load_subsampled_clouds(self, orient_correction):

        # restart timer
        t0 = time.time()

        # load wanted points if possible
        if self.train:
            split ='training'
        else:
            split = 'test'

        print('\nloading {:s} points subsampled at {:.3f}'.format(split, self.config.first_subsampling_dl))
        filename = join(self.path, '{:s}_{:.3f}_record.pkl'.format(split, self.config.first_subsampling_dl)) #序列化文件

        if exists(filename):# determine whether the file exists
            with open(filename, 'rb') as file:
                input_points, input_normals, input_labels = pickle.load(file)

        # else compute them from original points
        else:

            # collect training file names
            if self.train:
                names = np.loadtxt(join(self.path, 'modelnet40_train.txt'), dtype=np.str)
            else:
                names = np.loadtxt(join(self.path, 'modelnet40_test.txt'), dtype=np.str)#包括了各个小文件

            # initialize containers
            input_points = []#降采样完之后的点
            input_normals = []

            # advanced display
            n = len(names)
            progress_n = 30#空格的大小
            fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'

            # collect point clouds
            for i, cloud_name in enumerate(names):

                # read points
                class_folder = '_'.join(cloud_name.split('_')[:-1])
                txt_file = join(self.path, class_folder, cloud_name) + '.txt'
                data = np.loadtxt(txt_file, delimiter=',', dtype=np.float32)

                # subsample them
                if self.config.first_subsampling_dl > 0:
                    #data是x,y,z,feature是 x,y,z后面的
                    points, normals = grid_subsampling(data[:, :3],
                                                       features=data[:, 3:],
                                                       sampledl=self.config.first_subsampling_dl)
                else:
                    points = data[:, :3]
                    normals = data[:, 3:]

                print('', end='\r')
                print(fmt_str.format('#' * ((i * progress_n) // n), 100 * i / n), end='', flush=true)

                # add to list
                input_points += [points]#转化成列表输出
                input_normals += [normals]

            print('', end='\r')
            print(fmt_str.format('#' * progress_n, 100), end='', flush=true)
            print()

            # get labels
            label_names = ['_'.join(name.split('_')[:-1]) for name in names]
            input_labels = np.array([self.name_to_label[name] for name in label_names])#所有文件对应的label

            # save for later use
            with open(filename, 'wb') as file:
                pickle.dump((input_points,
                             input_normals,
                             input_labels), file)#储存每一个文件的采样后的点,法向量,以及label

        lengths = [p.shape[0] for p in input_points]#储存了点的个数
        sizes = [l * 4 * 6 for l in lengths]
        print('{:.1f} mb loaded in {:.1f}s'.format(np.sum(sizes) * 1e-6, time.time() - t0))#计算文件大小

        if orient_correction:#是否调换一下 y,z坐标
            input_points = [pp[:, [0, 2, 1]] for pp in input_points]
            input_normals = [nn[:, [0, 2, 1]] for nn in input_normals]

        return input_points, input_normals, input_labels