def __init__(self, mode, test_id):
        self.name = 'SemanticKITTI'
        self.dataset_path = os.path.join(
            root_dir, 'data/semantickitti/dataset/sequences_0.06')
        self.label_to_names = {
            0: 'unlabeled',
            1: 'car',
            2: 'bicycle',
            3: 'motorcycle',
            4: 'truck',
            5: 'other-vehicle',
            6: 'person',
            7: 'bicyclist',
            8: 'motorcyclist',
            9: 'road',
            10: 'parking',
            11: 'sidewalk',
            12: 'other-ground',
            13: 'building',
            14: 'fence',
            15: 'vegetation',
            16: 'trunk',
            17: 'terrain',
            18: 'pole',
            19: 'traffic-sign'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])
        self.val_split = '08'
        self.seq_list = np.sort(os.listdir(self.dataset_path))
        self.test_scan_number = str(test_id)
        self.mode = mode
        self.train_list, self.val_list, self.test_list = DataProcessing.get_file_list(
            self.dataset_path, self.test_scan_number)
        if mode == 'training':
            self.data_list = self.train_list
        elif mode == 'validation':
            self.data_list = self.val_list
        elif mode == 'test':
            self.data_list = self.test_list

        # self.data_list = self.data_list[0:1]
        self.data_list = DataProcessing.shuffle_list(self.data_list)

        self.possibility = []
        self.min_possibility = []
        if mode == 'test':
            for test_file_name in self.data_list:
                points = np.load(test_file_name)
                self.possibility += [np.random.rand(points.shape[0]) * 1e-3]
                self.min_possibility += [float(np.min(self.possibility[-1]))]

        ConfigSemanticKITTI.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        ConfigSemanticKITTI.class_weights = DataProcessing.get_class_weights(
            'SemanticKITTI')
    def tf_map(self, batch_xyz, batch_features, batch_labels, batch_pc_idx,
               batch_cloud_idx):
        batch_feature = []
        for i in range(batch_xyz.shape[1]):
            xyz = batch_xyz[:, i, :]
            features = batch_features[:, i, :]
            batch_feature.append(self.tf_augment_input([xyz, features]))
        batch_feature = torch.stack(batch_feature, dim=1).numpy()
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        for i in range(ConfigSemantic3D.num_layers):
            # print('queried_pc_xyz shape:',batch_xyz.shape) # (1, N, 3)
            neighbour_idx = DataProcessing.knn_search(batch_xyz, batch_xyz,
                                                      ConfigSemantic3D.k_n)
            # print('neighbour_idx shape:', neighbour_idx.shape) # (1, N, 16)
            sub_points = batch_xyz[:, :batch_xyz.shape[1] //
                                   ConfigSemantic3D.sub_sampling_ratio[i], :]
            # print('sub_points shape:', sub_points.shape) # (1, N, 16)
            pool_i = neighbour_idx[:, :batch_xyz.shape[1] //
                                   ConfigSemantic3D.sub_sampling_ratio[i], :]
            up_i = DataProcessing.knn_search(sub_points, batch_xyz, 1)
            input_points.append(batch_xyz)
            input_neighbors.append(neighbour_idx)
            input_pools.append(pool_i)
            input_up_samples.append(up_i)
            batch_xyz = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [
            batch_feature, batch_labels, batch_pc_idx, batch_cloud_idx
        ]
        return input_list
    def tf_map(batch_pc, batch_label, batch_pc_idx, batch_cloud_idx):
        features = batch_pc
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        for i in range(ConfigSemanticKITTI.num_layers):
            neighbour_idx = DataProcessing.knn_search(batch_pc, batch_pc,
                                                      ConfigSemanticKITTI.k_n)
            sub_points = batch_pc[:, :batch_pc.shape[1] //
                                  ConfigSemanticKITTI.sub_sampling_ratio[i], :]
            pool_i = neighbour_idx[:, :batch_pc.shape[1] //
                                   ConfigSemanticKITTI.
                                   sub_sampling_ratio[i], :]
            up_i = DataProcessing.knn_search(sub_points, batch_pc, 1)
            input_points.append(batch_pc)
            input_neighbors.append(neighbour_idx)
            input_pools.append(pool_i)
            input_up_samples.append(up_i)
            batch_pc = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [features, batch_label, batch_pc_idx, batch_cloud_idx]

        return input_list
예제 #4
0
    def __init__(self, dataset_name, config):
        super(RandLANET, self).__init__()
        self.config = config
        self.class_weights = DataProcessing.get_class_weights(dataset_name)

        self.fc0 = net_utils.conv1d(config.channels,
                                    8,
                                    1,
                                    1,
                                    0,
                                    'fc0',
                                    bias=True,
                                    bn=True)
        self.dilated_res_blocks = nn.ModuleList()
        d_in = 8
        for i in range(self.config.num_layers):
            d_out = self.config.d_out[i]
            self.dilated_res_blocks.append(Dilated_res_block(d_in, d_out))
            d_in = 2 * d_out
        d_out = d_in
        self.decoder_0 = net_utils.conv2d(d_in,
                                          d_out, (1, 1), (1, 1), (0, 0),
                                          'decoder_0',
                                          bias=True,
                                          bn=True)
        self.decoder_blocks = nn.ModuleList()
        for j in range(self.config.num_layers):
            if j < 3:
                d_in = self.config.d_out[-j -
                                         1] * 2 + 2 * self.config.d_out[-j - 2]
                d_out = 2 * self.config.d_out[-j - 2]
            else:
                d_in = self.config.d_out[-j - 1] * 2 + 2 * self.config.d_out[0]
                d_out = 2 * self.config.d_out[0]
            self.decoder_blocks.append(
                net_utils.conv2d_transpose(d_in,
                                           d_out, (1, 1), (1, 1), (0, 0),
                                           'decoder{}'.format(j),
                                           bias=True,
                                           bn=True))

        self.fc1 = net_utils.conv2d(d_out,
                                    64, (1, 1), (1, 1), (0, 0),
                                    'fc1',
                                    bias=True,
                                    bn=True)
        self.fc2 = net_utils.conv2d(64,
                                    32, (1, 1), (1, 1), (0, 0),
                                    'fc2',
                                    bias=True,
                                    bn=True)
        self.dropout = nn.Dropout(0.5)
        self.fc3 = net_utils.conv2d(32,
                                    self.config.num_classes, (1, 1), (1, 1),
                                    (0, 0),
                                    'fc3',
                                    bias=True,
                                    bn=False,
                                    activation_fn=None)
예제 #5
0
def convert_pointcloud2ply(annotations_path, save_path, sub_grid_size=0.04):
    """convert original files(.txt) to ply file(each line is XYZRGBL).

    Args:
        annotations_path (str): path to annotations
        save_path (str): path to save original point clouds (each line is XYZRGBL)
        sub_grid_size (float, optional): [description]. Defaults to 0.04.
    """
    make_dir(sub_grid_size)
    data_list = []
    for file in glob.glob(os.path.join(annotations_path, '*.txt')):
        class_name = os.path.basename(file).split('_')[0]

        if class_name not in ground_truth_class:
            class_name = 'clutter'

        pointcloud = pd.read_csv(file, header=None,
                                 delim_whitespace=True).values
        labels = np.ones(
            (pointcloud.shape[0], 1)) * ground_truth_label[class_name]
        data = np.concatenate([pointcloud, labels],
                              axis=1)  # x,y,z,r,g,b,label
        data_list.append(data)
        print(pointcloud)
        print(labels)

    pointcloud_and_label = np.concatenate([data for data in data_list], axis=0)
    xyz_min = np.min(pointcloud_and_label, axis=0)[0:3]
    pointcloud_and_label[:, 0:3] = pointcloud_and_label[:, 0:3] - xyz_min

    xyz = pointcloud_and_label[:, 0:3].astype(np.float32)
    colors = pointcloud_and_label[:, 3:6].astype(np.uint8)
    labels = pointcloud_and_label[:, 6].astype(np.uint8)
    ply.write_ply(save_path, (xyz, colors, labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    sub_xyz, sub_colors, sub_labels = DataProcessing.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pointcloud_folder,
                                save_path.split('/')[-1][:-4] + '.ply')
    ply.write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    project_index = np.squeeze(search_tree.query(xyz, return_distance=False))
    project_index = project_index.astype(np.int32)
    project_save = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_project.pkl')
    with open(project_save, 'wb') as f:
        pickle.dump([project_index, labels], f)
 def crop_pc(points, labels, search_tree, pick_idx):
     # crop a fixed size point cloud for training
     center_point = points[pick_idx, :].reshape(1, -1)
     select_idx = search_tree.query(center_point,
                                    k=ConfigSemanticKITTI.num_points)[1][0]
     select_idx = DataProcessing.shuffle_idx(select_idx)
     select_points = points[select_idx]
     select_labels = labels[select_idx]
     return select_points, select_labels, select_idx
def viewer(data_path):
    pointcloud_list = DataProcessing.get_pointcloud_list_semantickitti(
        data_path)
    label_list = DataProcessing.get_label_list_semantickitti(data_path)

    colors = Plot.random_colors(21, seed=2)

    for index in range(len(pointcloud_list)):
        pointcloud_path = pointcloud_list[index]
        label_path = label_list[index]

        pointcloud = DataProcessing.load_pc_kitti(pointcloud_path)
        label = DataProcessing.load_label_kitti(label_path, remap_lut)

        pointcloud_withlabel = np.zeros((pointcloud.shape[0], 6), dtype=np.int)
        pointcloud_withlabel[:, 0:3] = pointcloud
        pointcloud_withlabel[:, 5] = 1
        Plot.draw_pointcloud(pointcloud, "pointcloud:{}".format(index))
        Plot.draw_pointcloud_semantic_instance(
            pointcloud, label, "pointcloud_label:{}".format(index))
def convert_pointcloud2ply(annotations_path, save_path, sub_grid_size=0.04):
    """convert original files(.txt) to ply file(each line is XYZRGBL).

    Args:
        annotations_path (str): path to annotations
        save_path (str): path to save original point clouds (each line is XYZRGBL)
        sub_grid_size (float, optional): [description]. Defaults to 0.04.
    """
    make_dir(sub_grid_size)

    class_name = os.path.basename(annotations_path).split('/')[0][:-9]
    pointcloud = np.loadtxt(annotations_path, delimiter=',').astype(np.float32)
    labels = np.ones((pointcloud.shape[0], 1)) * ground_truth_label[class_name]
    pointcloud_and_label = np.concatenate([pointcloud, labels], axis=1)
    xyz_min = np.min(pointcloud_and_label, axis=0)[0:3]
    pointcloud_and_label[:, 0:3] = pointcloud_and_label[:, 0:3] - xyz_min

    xyz = pointcloud_and_label[:, 0:3].astype(np.float32)
    colors = pointcloud_and_label[:, 3:6].astype(np.uint8)
    labels = pointcloud_and_label[:, 6].astype(np.uint8)

    print(save_path)
    ply.write_ply(save_path, (xyz, colors, labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    sub_xyz, sub_colors, sub_labels = DataProcessing.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pointcloud_folder,
                                save_path.split('/')[-1][:-4] + '.ply')
    print(sub_ply_file)
    ply.write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    print(kd_tree_file)
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    project_index = np.squeeze(search_tree.query(xyz, return_distance=False))
    project_index = project_index.astype(np.int32)
    project_save = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_project.pkl')
    print(project_save)
    with open(project_save, 'wb') as f:
        pickle.dump([project_index, labels], f)
    def spatially_regular_gen(self, item):
        # Generator loop
        cloud_idx = int(np.argmin(self.min_possibility[self.mode]))
        # choose the point with the minimum of possibility in the cloud as query point
        point_ind = np.argmin(self.possibility[self.mode][cloud_idx])
        # Get all points within the cloud from tree structure
        points = np.array(self.input_trees[self.mode][cloud_idx].data,
                          copy=False)
        # Center point of input region
        center_point = points[point_ind, :].reshape(1, -1)
        # Add noise to the center point
        noise = np.random.normal(scale=ConfigSemantic3D.noise_init / 10,
                                 size=center_point.shape)
        pick_point = center_point + noise.astype(center_point.dtype)
        query_idx = self.input_trees[self.mode][cloud_idx].query(
            pick_point, k=ConfigSemantic3D.num_points)[1][0]

        # Shuffle index
        query_idx = DataProcessing.shuffle_idx(query_idx)
        # Get corresponding points and colors based on the index
        queried_pc_xyz = points[query_idx]
        queried_pc_xyz[:, 0:2] = queried_pc_xyz[:, 0:2] - pick_point[:, 0:2]
        queried_pc_colors = self.input_colors[self.mode][cloud_idx][query_idx]
        if self.mode == 'test':
            queried_pc_labels = np.zeros(queried_pc_xyz.shape[0])
            queried_pt_weight = 1
        else:
            queried_pc_labels = self.input_labels[
                self.mode][cloud_idx][query_idx]
            queried_pc_labels = np.array(
                [self.label_to_idx[l] for l in queried_pc_labels])
            queried_pt_weight = np.array([
                self.class_weight[self.mode][0][n] for n in queried_pc_labels
            ])

        # Update the possibility of the selected points
        dists = np.sum(np.square(
            (points[query_idx] - pick_point).astype(np.float32)),
                       axis=1)
        delta = np.square(1 - dists / np.max(dists)) * queried_pt_weight
        self.possibility[self.mode][cloud_idx][query_idx] += delta
        self.min_possibility[self.mode][cloud_idx] = float(
            np.min(self.possibility[self.mode][cloud_idx]))

        return queried_pc_xyz, queried_pc_colors.astype(
            np.float32), queried_pc_labels, query_idx.astype(
                np.int32), np.array([cloud_idx], dtype=np.int32)
    pc_path_out = os.path.join(seq_path_out, 'velodyne')
    KDTree_path_out = os.path.join(seq_path_out, 'KDTree')
    os.makedirs(seq_path_out) if not os.path.exists(seq_path_out) else None
    os.makedirs(pc_path_out) if not os.path.exists(pc_path_out) else None
    os.makedirs(
        KDTree_path_out) if not os.path.exists(KDTree_path_out) else None

    if int(seq_id) < 11:
        label_path = os.path.join(seq_path, 'labels')
        label_path_out = os.path.join(seq_path_out, 'labels')
        os.makedirs(
            label_path_out) if not os.path.exists(label_path_out) else None
        scan_list = np.sort(os.listdir(pc_path))
        for scan_id in scan_list:
            print(scan_id)
            points = DataProcessing.load_pc_kitti(
                os.path.join(pc_path, scan_id))
            labels = DataProcessing.load_label_kitti(
                os.path.join(label_path,
                             str(scan_id[:-4]) + '.label'), remap_lut)
            sub_points, sub_labels = DataProcessing.grid_sub_sampling(
                points, labels=labels, grid_size=sub_grid_size)
            search_tree = KDTree(sub_points)
            KDTree_save = os.path.join(KDTree_path_out,
                                       str(scan_id[:-4]) + '.pkl')
            np.save(os.path.join(pc_path_out, scan_id)[:-4], sub_points)
            np.save(os.path.join(label_path_out, scan_id)[:-4], sub_labels)
            with open(KDTree_save, 'wb') as f:
                pickle.dump(search_tree, f)
            if seq_id == '08':
                proj_path = os.path.join(seq_path_out, 'proj')
                os.makedirs(
def convert_txt2ply(save_path=None, sub_grid_size=0.06):
    """convert original files to ply file(each line is XYZRGBL).

    Args:
        save_path ([type], optional): [description]. Defaults to None.
        sub_grid_size (float, optional): [description]. Defaults to 0.06.
    """
    make_dir(sub_grid_size)

    for pointcloud_path in glob.glob(
            os.path.join(semantic3d_data_path, '*.txt')):
        print(pointcloud_path)
        filename = pointcloud_path.split('/')[-1][:-4]

        if os.path.exists(
                os.path.join(sub_pointcloud_folder, filename + '_KDTree.pkl')):
            continue

        pointcloud = DataProcessing.load_pc_semantic3d(pointcloud_path)
        label_path = pointcloud_path[:-4] + '.labels'
        print(label_path)
        if os.path.exists(label_path):
            labels = DataProcessing.load_label_semantic3d(label_path)
            full_ply_path = os.path.join(original_pointcloud_folder,
                                         filename + '.ply')

            sub_points, sub_colors, sub_labels = DataProcessing.grid_sub_sampling(
                pointcloud[:, :3].astype(np.float32),
                pointcloud[:, 4:7].astype(np.uint8), labels, 0.01)
            sub_labels = np.squeeze(sub_labels)
            ply.write_ply(full_ply_path, (sub_points, sub_colors, sub_labels),
                          ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            sub_xyz, sub_colors, sub_labels = DataProcessing.grid_sub_sampling(
                sub_points, sub_colors, sub_labels, sub_grid_size)
            sub_colors = sub_colors / 255.0
            sub_labels = np.squeeze(sub_labels)
            sub_ply_file = os.path.join(sub_pointcloud_folder,
                                        filename + '.ply')
            ply.write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                          ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            search_tree = KDTree(sub_xyz, leaf_size=50)
            kd_tree_file = os.path.join(sub_pointcloud_folder,
                                        filename + '_KDTree.pkl')
            with open(kd_tree_file, 'wb') as f:
                pickle.dump(search_tree, f)

            proj_idx = np.squeeze(
                search_tree.query(sub_points, return_distance=False))
            proj_idx = proj_idx.astype(np.int32)
            proj_save = os.path.join(sub_pointcloud_folder,
                                     filename + '_proj.pkl')
            with open(proj_save, 'wb') as f:
                pickle.dump([proj_idx, labels], f)

        else:
            fully_ply_path = os.path.join(original_pointcloud_folder,
                                          filename + '.ply')
            ply.write_ply(fully_ply_path, (pointcloud[:, :3].astype(
                np.float32), pointcloud[:, 4:7].astype(np.uint8)),
                          ['x', 'y', 'z', 'red', 'green', 'blue'])

            sub_xyz, sub_colors = DataProcessing.grid_sub_sampling(
                pointcloud[:, :3].astype(np.float32),
                pointcloud[:, 4:7].astype(np.uint8),
                grid_size=sub_grid_size)
            sub_colors = sub_colors / 255.0
            sub_ply_file = os.path.join(sub_pointcloud_folder,
                                        filename + '.ply')
            ply.write_ply(sub_ply_file, [sub_xyz, sub_colors],
                          ['x', 'y', 'z', 'red', 'green', 'blue'])
            labels = np.zeros(pointcloud.shape[0], dtype=np.uint8)

            search_tree = KDTree(sub_xyz, leaf_size=50)
            kd_tree_file = os.path.join(sub_pointcloud_folder,
                                        filename + '_KDTree.pkl')
            with open(kd_tree_file, 'wb') as f:
                pickle.dump(search_tree, f)

            proj_idx = np.squeeze(
                search_tree.query(pointcloud[:, :3].astype(np.float32),
                                  return_distance=False))
            proj_idx = proj_idx.astype(np.int32)
            proj_save = os.path.join(sub_pointcloud_folder,
                                     filename + '_proj.pkl')
            with open(proj_save, 'wb') as f:
                pickle.dump([proj_idx, labels], f)
    def detect_pc(self):
        colors = Plot.random_colors(21, seed=2)
        # for batch_idx, batch_data in enumerate(self.test_dataloader):
        #     for key in batch_data:
        #         if type(batch_data[key]) is list:
        #             for i in range(len(batch_data[key])):
        #                 batch_data[key][i] = batch_data[key][i].cuda()
        #         else:
        #             batch_data[key] = batch_data[key].cuda()

        #     xyz = batch_data['xyz']  # (batch,N,3)
        #     neigh_idx = batch_data['neigh_idx']  # (batch,N,16)
        #     sub_idx = batch_data['sub_idx']  # (batch,N/4,16)
        #     interp_idx = batch_data['interp_idx']  # (batch,N,1)
        #     features = batch_data['features']  # (batch, 3, N)
        #     labels = batch_data['labels']  # (batch, N)
        #     input_inds = batch_data['input_inds']  # (batch, N)
        #     cloud_inds = batch_data['cloud_inds']  # (batch, 1)

        #     with torch.no_grad():
        #         self.out = self.net(xyz, neigh_idx, sub_idx, interp_idx,
        #                             features, labels, input_inds, cloud_inds)
        #         Plot.draw_pointcloud(xyz[0].squeeze().cpu().numpy(),
        #                              "pointcloud:{}".format(batch_idx))
        #         Plot.draw_pointcloud_semantic_instance(
        #             xyz[0].squeeze().cpu().numpy(),
        #             labels.cpu().numpy()[0],
        #             "pointcloud_label:{}".format(batch_idx), colors)
        #         Plot.draw_pointcloud_semantic_instance(
        #             xyz[0].squeeze().cpu().numpy(),
        #             self.out.argmax(dim=1).cpu().numpy().squeeze(),
        #             "pointcloud_label:{}".format(batch_idx), colors)

        #         print(self.out.argmax(dim=1).cpu().numpy().squeeze())
        #         print(labels.cpu().numpy()[0])

        for seq_id in sequence_list:
            print('sequence' + seq_id + ' start')
            seq_path = os.path.join(dataset_path, seq_id)
            pc_path = os.path.join(seq_path, 'velodyne')
            label_path = os.path.join(seq_path, 'labels')
            scan_list = np.sort(os.listdir(pc_path))
            for scan_id in scan_list:
                print(scan_id)
                points = DataProcessing.load_pc_kitti(
                    os.path.join(pc_path, scan_id))
                labels = DataProcessing.load_label_kitti(
                    os.path.join(label_path,
                                 str(scan_id[:-4]) + '.label'), remap_lut)
                # label_ = labels
                search_tree = KDTree(points)
                pick_idx = np.random.choice(len(points), 1)
                print(pick_idx)
                selected_pc_, selected_labels_, selected_idx_, cloud_ind_ = [],[],[],[]
                # selected_pc, selected_labels, selected_idx = SemanticKITTI.crop_pc(
                #     points, labels, search_tree, pick_idx)
                # selected_pc = selected_pc.astype(np.float32)
                # selected_labels = selected_labels.astype(np.int32)
                # selected_idx = selected_idx.astype(np.int32)

                selected_pc = points.astype(np.float32)
                selected_labels = labels.astype(np.int32)
                selected_idx = pick_idx.astype(np.int32)

                selected_pc_.append(selected_pc)  # (N,3)
                selected_labels_.append(selected_labels)  # (N,)
                selected_idx_.append(selected_idx)  # (N,)
                cloud_ind_.append(np.array([scan_id[:-4]],
                                           dtype=np.int32))  # (1,)

                selected_pc_ = np.stack(selected_pc_)  # (batch,N,3)
                selected_labels_ = np.stack(selected_labels_)  # (batch,N)
                selected_idx_ = np.stack(selected_idx_)  # (batch,N)
                cloud_ind_ = np.stack(cloud_ind_)  # (batch,1)

                flat_inputs = SemanticKITTI.tf_map(selected_pc_,
                                                   selected_labels_,
                                                   selected_idx_, cloud_ind_)

                num_layers = ConfigSemanticKITTI.num_layers
                inputs = {}
                inputs['xyz'] = []  # (batch,N,3)
                for tmp in flat_inputs[:num_layers]:
                    inputs['xyz'].append(torch.from_numpy(tmp).float().cuda())
                inputs['neigh_idx'] = []  # (batch,N,16)
                for tmp in flat_inputs[num_layers:2 * num_layers]:
                    inputs['neigh_idx'].append(
                        torch.from_numpy(tmp).long().cuda())
                inputs['sub_idx'] = []  # (batch,N/4,16)
                for tmp in flat_inputs[2 * num_layers:3 * num_layers]:
                    inputs['sub_idx'].append(
                        torch.from_numpy(tmp).long().cuda())
                inputs['interp_idx'] = []  # (batch,N,1)
                for tmp in flat_inputs[3 * num_layers:4 * num_layers]:
                    inputs['interp_idx'].append(
                        torch.from_numpy(tmp).long().cuda())
                inputs['features'] = torch.from_numpy(
                    flat_inputs[4 * num_layers]).transpose(
                        1, 2).float().cuda()  # (batch, N, 3)->(batch, 3, N)
                inputs['labels'] = torch.from_numpy(
                    flat_inputs[4 * num_layers +
                                1]).long().cuda()  # (batch, N)
                inputs['input_inds'] = torch.from_numpy(
                    flat_inputs[4 * num_layers +
                                2]).long().cuda()  # (batch, N)
                inputs['cloud_inds'] = torch.from_numpy(
                    flat_inputs[4 * num_layers +
                                3]).long().cuda()  # (batch, 1)

                xyz = inputs['xyz']  # (batch,N,3)
                neigh_idx = inputs['neigh_idx']  # (batch,N,16)
                sub_idx = inputs['sub_idx']  # (batch,N/4,16)
                interp_idx = inputs['interp_idx']  # (batch,N,1)
                features = inputs['features']  # (batch, 3, N)
                labels = inputs['labels']  # (batch, N)
                input_inds = inputs['input_inds']  # (batch, N)
                cloud_inds = inputs['cloud_inds']  # (batch, 1)

                with torch.no_grad():
                    # labels = labels.reshape(-1)
                    self.out = self.net(
                        xyz, neigh_idx, sub_idx, interp_idx, features, labels,
                        input_inds,
                        cloud_inds).argmax(dim=1).cpu().numpy().squeeze()
                    print(self.out)
                    pred = self.out.astype(np.uint32)
                    upper_half = pred >> 16
                    lower_half = pred & 0xFFFF
                    lower_half = remap_lut[lower_half]
                    pred = (upper_half << 16) + lower_half
                    pred = pred.astype(np.uint32)
                    print(pred)
                    # logits = self.out.transpose(1, 2).reshape(
                    #     -1, ConfigSemanticKITTI.num_classes)
                    # ignored_bool = labels == 0
                    # for ign_label in ConfigSemanticKITTI.ignored_label_inds:
                    #     ignored_bool = ignored_bool | (labels == ign_label)
                    # valid_idx = ignored_bool == 0
                    # valid_logits = logits[valid_idx, :]
                    # valid_labels_init = labels[valid_idx]

                    # print(valid_logits.shape)

                    # print(self.out.argmax(dim=1).cpu().numpy().squeeze().shape)

                    # Plot.draw_pointcloud(xyz[0].squeeze().cpu().numpy(),
                    #                      "pointcloud:{}".format(scan_id))
                    # Plot.draw_pointcloud_semantic_instance(
                    #     xyz[0].squeeze().cpu().numpy(), pred,
                    #     "pointcloud_label:{}".format(scan_id))

                    # print(self.out.argmax(dim=1).cpu().numpy().squeeze())
                    print(labels.cpu().numpy()[0])
                    Plot.draw_pointcloud_semantic_instance(
                        xyz[0].squeeze().cpu().numpy(),
                        labels.cpu().numpy()[0],
                        "pointcloud_label:{}".format(scan_id))
    def __init__(self, mode):
        self.name = 'Semantic3D'
        self.mode = mode
        self.path = os.path.join(root_dir, 'data/semantic3d')
        self.label_to_names = {
            0: 'unlabeled',
            1: 'man-made terrain',
            2: 'natural terrain',
            3: 'high vegetation',
            4: 'low vegetation',
            5: 'buildings',
            6: 'hard scape',
            7: 'scanning artefacts',
            8: 'cars'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.original_folder = os.path.join(self.path, 'original_data')
        self.full_pc_folder = os.path.join(self.path, 'original_ply')
        self.sub_pc_folder = os.path.join(
            self.path, 'input_{:.3f}'.format(ConfigSemantic3D.sub_grid_size))

        # Following KPConv to do the train-validation split
        self.all_splits = [0, 1, 4, 5, 3, 4, 3, 0, 1, 2, 3, 4, 2, 0, 5]
        self.val_split = 1

        # Initial training-validation-testing files
        self.train_files = []
        self.val_files = []
        self.test_files = []
        cloud_names = [
            file_name[:-4] for file_name in os.listdir(self.original_folder)
            if file_name[-4:] == '.txt'
        ]
        for pc_name in cloud_names:
            if os.path.exists(
                    os.path.join(self.original_folder, pc_name + '.labels')):
                self.train_files.append(
                    os.path.join(self.sub_pc_folder, pc_name + '.ply'))
            else:
                self.test_files.append(
                    os.path.join(self.full_pc_folder, pc_name + '.ply'))

        self.train_files = np.sort(self.train_files)
        self.test_files = np.sort(self.test_files)

        for i, file_path in enumerate(self.train_files):
            if self.all_splits[i] == self.val_split:
                self.val_files.append(file_path)

        self.train_files = np.sort(
            [x for x in self.train_files if x not in self.val_files])

        # Initiate containers
        self.val_proj = []
        self.val_labels = []
        self.test_proj = []
        self.test_labels = []

        self.possibility = {'training': [], 'validation': [], 'test': []}
        self.min_possibility = {'training': [], 'validation': [], 'test': []}
        self.class_weight = {'training': [], 'validation': [], 'test': []}
        self.input_trees = {'training': [], 'validation': [], 'test': []}
        self.input_colors = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': []}

        # Ascii files dict for testing
        self.ascii_files = {
            'MarketplaceFeldkirch_Station4_rgb_intensity-reduced.ply':
            'marketsquarefeldkirch4-reduced.labels',
            'sg27_station10_rgb_intensity-reduced.ply':
            'sg27_10-reduced.labels',
            'sg28_Station2_rgb_intensity-reduced.ply':
            'sg28_2-reduced.labels',
            'StGallenCathedral_station6_rgb_intensity-reduced.ply':
            'stgallencathedral6-reduced.labels',
            'birdfountain_station1_xyz_intensity_rgb.ply':
            'birdfountain1.labels',
            'castleblatten_station1_intensity_rgb.ply':
            'castleblatten1.labels',
            'castleblatten_station5_xyz_intensity_rgb.ply':
            'castleblatten5.labels',
            'marketplacefeldkirch_station1_intensity_rgb.ply':
            'marketsquarefeldkirch1.labels',
            'marketplacefeldkirch_station4_intensity_rgb.ply':
            'marketsquarefeldkirch4.labels',
            'marketplacefeldkirch_station7_intensity_rgb.ply':
            'marketsquarefeldkirch7.labels',
            'sg27_station10_intensity_rgb.ply':
            'sg27_10.labels',
            'sg27_station3_intensity_rgb.ply':
            'sg27_3.labels',
            'sg27_station6_intensity_rgb.ply':
            'sg27_6.labels',
            'sg27_station8_intensity_rgb.ply':
            'sg27_8.labels',
            'sg28_station2_intensity_rgb.ply':
            'sg28_2.labels',
            'sg28_station5_xyz_intensity_rgb.ply':
            'sg28_5.labels',
            'stgallencathedral_station1_intensity_rgb.ply':
            'stgallencathedral1.labels',
            'stgallencathedral_station3_intensity_rgb.ply':
            'stgallencathedral3.labels',
            'stgallencathedral_station6_intensity_rgb.ply':
            'stgallencathedral6.labels'
        }

        ConfigSemantic3D.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        ConfigSemantic3D.class_weights = DataProcessing.get_class_weights(
            'Semantic3D')
        self.load_sub_sampled_clouds(ConfigSemantic3D.sub_grid_size)