Ejemplo n.º 1
0
def main():
    cfg = ConfigTest
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = Network(cfg).to(device)
    print("model parameters:", sum(param.numel() for param in net.parameters()))

    for i in tqdm(range(10)):
        npts = cfg.num_points
        pcld = np.random.rand(1, npts, 3)
        feat = np.random.rand(1, 6, npts)
        n_layers = 4
        sub_s_r = [16, 1, 4, 1]
        inputs = {}
        for i in range(n_layers):
            nei_idx = DP.knn_search(pcld, pcld, 16)
            sub_pts = pcld[:, :pcld.shape[1] // sub_s_r[i], :]
            pool_i = nei_idx[:, :pcld.shape[1] // sub_s_r[i], :]
            up_i = torch.LongTensor(DP.knn_search(sub_pts, pcld, 1))
            inputs['xyz'] = inputs.get('xyz', []) + [torch.from_numpy(pcld).float().to(device)]
            inputs['neigh_idx'] = inputs.get('neigh_idx', []) + [torch.LongTensor(nei_idx).to(device)]
            inputs['sub_idx'] = inputs.get('sub_idx', []) + [torch.LongTensor(pool_i).to(device)]
            inputs['interp_idx'] = inputs.get('interp_idx', []) + [torch.LongTensor(up_i).to(device)]
            pcld = sub_pts
        inputs['features'] = torch.from_numpy(feat).float().to(device)

        end_points = net(inputs)

    for k, v in end_points.items():
        if type(v) == list:
            for ii, item in enumerate(v):
                print(k+'%d'%ii, item.size())
        else:
            print(k, v.size())
Ejemplo n.º 2
0
    def tf_map(self, batch_pc, batch_label, batch_pc_idx, batch_cloud_idx):
        features = batch_pc
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        for i in range(cfg.num_layers):  #4
            neighbour_idx = DP.knn_search(batch_pc, batch_pc,
                                          cfg.k_n)  #16 return index[B,N,K]
            #取前1/4N 个点
            sub_points = batch_pc[:, :batch_pc.shape[1] //
                                  cfg.sub_sampling_ratio[i], :]
            pool_i = neighbour_idx[:, :batch_pc.shape[1] //
                                   cfg.sub_sampling_ratio[i], :]
            #[B,N,1] 原始点集对应sub点集的idx
            up_i = DP.knn_search(sub_points, batch_pc, 1)
            input_points.append(batch_pc)  #[N,  N/4, N/16,N/64]
            input_neighbors.append(neighbour_idx)  #[N,  N/4, N/16,N/64],k
            input_pools.append(pool_i)  #[N/4,N/16,N/64,N/256],k
            input_up_samples.append(up_i)  #[N,  N/4, N/16,N/64],1
            batch_pc = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [features, batch_label, batch_pc_idx, batch_cloud_idx]

        return input_list
Ejemplo n.º 3
0
    def np_map(self, batch_xyz, batch_features, batch_labels, batch_pc_idx,
               batch_cloud_idx):
        batch_features = np.concatenate([batch_xyz, batch_features], axis=-1)
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        for i in range(cfg.num_layers):
            neighbour_idx = DP.knn_search(batch_xyz, batch_xyz, cfg.k_n)
            sub_points = batch_xyz[:, :batch_xyz.shape[1] //
                                   cfg.sub_sampling_ratio[i], :]
            pool_i = neighbour_idx[:, :batch_xyz.shape[1] //
                                   cfg.sub_sampling_ratio[i], :]
            up_i = DP.knn_search(sub_points, batch_xyz, 1)
            input_points.append(batch_xyz)
            input_neighbors.append(neighbour_idx)
            input_pools.append(pool_i)
            input_up_samples.append(up_i)
            batch_xyz = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [
            batch_features, batch_labels, batch_pc_idx, batch_cloud_idx
        ]

        return input_list
Ejemplo n.º 4
0
    def np_map(self, batch_xyz, batch_features, batch_labels, batch_pc_idx,
               batch_cloud_idx):
        # erase tf
        batch_features = self.np_augment_input([batch_xyz, batch_features])
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        # erase tf
        for i in range(cfg.num_layers):
            neigh_idx = DP.knn_search(batch_xyz, batch_xyz, cfg.k_n)
            sub_points = batch_xyz[:, :batch_xyz.shape[1] //
                                   cfg.sub_sampling_ratio[i], :]
            pool_i = neigh_idx[:, :batch_pc.shape[1] //
                               cfg.sub_sampling_ratio[i], :]
            up_i = DP.knn_search(sub_points, batch_xyz, 1)
            input_points.append(batch_xyz)
            input_neighbors.append(neigh_idx)
            input_pools.append(pool_i)
            input_up_samples.append(up_i)
            batch_xyz = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [
            batch_features, batch_labels, batch_pc_idx, batch_cloud_idx
        ]

        return input_list
Ejemplo n.º 5
0
    def __init__(self):
        # with open('./lib/datasets/dataloader_config.json', 'r') as f:
        #     json_obj = json.load(f)
        # self.root = json_obj['TRAINING_DATA_PATH']
        # self.num_pts = json_obj['NUM_POINTS']
        # self.pc_path = os.path.join(self.root, 'point_cloud')
        # self.label_path = os.path.join(self.root, 'labels')
        # self.frames = [s.split('.')[0] for s in os.listdir(self.pc_path) if '.bin' in s ]
        self.name = 'KITTI'
        self.root = cfg.train_data_path
        self.num_pts = cfg.num_points
        self.pc_path = os.path.join(self.root, 'point_cloud')
        self.label_path = os.path.join(self.root, 'labels')
        self.frames = [s.split('.')[0] for s in os.listdir(self.pc_path) if '.bin' in s ]
        self.num_classes = cfg.num_classes
        if self.num_classes ==1:
            self.num_classes = 0
        self.num_features = cfg.num_features
        self.num_target_attributes = cfg.num_target_attributes
        self.split_ratio = cfg.split_ratio
        self.num_samples = len(self.frames)
        assert np.abs(np.sum(self.split_ratio) - 1.0) < 1e-5
        train_split = int(self.num_samples * self.split_ratio[0])
        val_split = int(self.num_samples * np.sum(self.split_ratio[:2]))

        self.frames_indices = np.arange(len(self.frames))
        # self.train_list = self.frames[:train_split]
        # self.val_list = self.frames[train_split:val_split]
        # self.test_list = self.frames[val_split:]
        self.train_list = self.frames_indices[:train_split]
        self.val_list = self.frames_indices[train_split:val_split]
        self.test_list = self.frames_indices[val_split:]

        self.train_list = DP.shuffle_list(self.train_list)
        self.val_list = DP.shuffle_list(self.val_list)
Ejemplo n.º 6
0
    def spatially_regular_gen(self, item):

        # Choose the cloud with the lowest probability
        cloud_idx = int(np.argmin(self.min_possibility[self.mode]))

        # choose the point with the minimum of possibility in the cloud as query point
        point_ind = np.argmin(self.possibility[self.mode][cloud_idx])

        # Get all points within the cloud from tree structure
        points = np.array(self.input_trees[self.mode][cloud_idx].data,
                          copy=False)

        # Center point of input region
        center_point = points[point_ind, :].reshape(1, -1)

        # Add noise to the center point
        noise = np.random.normal(scale=cfg.noise_init / 10,
                                 size=center_point.shape)
        pick_point = center_point + noise.astype(center_point.dtype)

        # Check if the number of points in the selected cloud is less than the predefined num_points
        if len(points) < cfg.num_points:
            # Query all points within the cloud
            queried_idx = self.input_trees[self.mode][cloud_idx].query(
                pick_point, k=len(points))[1][0]
        else:
            # Query the predefined number of points
            queried_idx = self.input_trees[self.mode][cloud_idx].query(
                pick_point, k=cfg.num_points)[1][0]

        # Shuffle index
        queried_idx = DP.shuffle_idx(queried_idx)
        # Get corresponding points and colors based on the index
        queried_pc_xyz = points[queried_idx]
        queried_pc_xyz = queried_pc_xyz - pick_point
        queried_pc_colors = self.input_colors[
            self.mode][cloud_idx][queried_idx]
        queried_pc_labels = self.input_labels[
            self.mode][cloud_idx][queried_idx]

        # Update the possibility of the selected points
        dists = np.sum(np.square(
            (points[queried_idx] - pick_point).astype(np.float32)),
                       axis=1)
        delta = np.square(1 - dists / np.max(dists))
        self.possibility[self.mode][cloud_idx][queried_idx] += delta
        self.min_possibility[self.mode][cloud_idx] = float(
            np.min(self.possibility[self.mode][cloud_idx]))

        # up_sampled with replacement
        if len(points) < cfg.num_points:
            queried_pc_xyz, queried_pc_colors, queried_idx, queried_pc_labels = \
                DP.data_aug(queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cfg.num_points)

        return queried_pc_xyz.astype(np.float32), queried_pc_colors.astype(
            np.float32), queried_pc_labels, queried_idx.astype(
                np.int32), np.array([cloud_idx], dtype=np.int32)
Ejemplo n.º 7
0
    def __init__(self, mode, test_id=None):
        self.name = 'SemanticKITTI'
        self.dataset_path = '/data/WQ/DataSet/semantic-kitti/dataset/sequences_0.06'
        self.label_to_names = {0: 'unlabeled',
                               1: 'car',
                               2: 'bicycle',
                               3: 'motorcycle',
                               4: 'truck',
                               5: 'other-vehicle',
                               6: 'person',
                               7: 'bicyclist',
                               8: 'motorcyclist',
                               9: 'road',
                               10: 'parking',
                               11: 'sidewalk',
                               12: 'other-ground',
                               13: 'building',
                               14: 'fence',
                               15: 'vegetation',
                               16: 'trunk',
                               17: 'terrain',
                               18: 'pole',
                               19: 'traffic-sign'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort([k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.seq_list = np.sort(os.listdir(self.dataset_path))

        if mode == 'test':
            self.test_scan_number = str(test_id)

        self.mode = mode
        train_list, val_list, test_list = DP.get_file_list(self.dataset_path, str(test_id))
        if mode == 'training':
            self.data_list = train_list
        elif mode == 'validation':
            self.data_list = val_list
        elif mode == 'test':
            self.data_list = test_list

        # self.data_list = self.data_list[0:1]
        self.data_list = DP.shuffle_list(self.data_list)

        self.possibility = []
        self.min_possibility = []
        if mode == 'test':
            path_list = self.data_list
            for test_file_name in path_list:
                points = np.load(test_file_name)
                self.possibility += [np.random.rand(points.shape[0]) * 1e-3]
                self.min_possibility += [float(np.min(self.possibility[-1]))]

        cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]
        cfg.class_weights = DP.get_class_weights('SemanticKITTI')
Ejemplo n.º 8
0
    def __init__(self, data_path, path_cls, test_name):

        self.path = data_path
        self.test_name = test_name

        self.original = os.path.join(self.path, "original")
        self.sub_folder = os.path.join(self.path, "sub")

        classes, label_values, class2labels, label2color, label2names = DP.get_info_classes(
            path_cls)
        self.label_values = np.array(label_values)

        self.ignored_classes = []  # TODO TEST
        self.ignored_labels = np.array(
            [class2labels[cls] for i, cls in enumerate(self.ignored_classes)])

        # Initiate containers
        self.val_proj = []
        self.val_labels = []
        self.possibility = {}
        self.min_possibility = {}
        self.input_trees = {'test': []}
        self.input_colors = {'test': []}
        self.input_labels = {'test': []}
        self.input_names = {'test': []}
        self.input_full_xyz = {'test': []}
        self.load_sub_sampled_clouds(cfg.sub_grid_size)
Ejemplo n.º 9
0
        def spatially_regular_gen():

            # Generator loop
            for i in range(num_per_epoch):  # num_per_epoch

                # Choose the cloud with the lowest probability
                cloud_idx = int(np.argmin(self.min_possibility[split]))

                # choose the point with the minimum of possibility in the cloud as query point
                point_ind = np.argmin(self.possibility[split][cloud_idx])

                # Get all points within the cloud from tree structure
                points = np.array(self.input_trees[split][cloud_idx].data,
                                  copy=False)

                # Center point of input region
                center_point = points[point_ind, :].reshape(1, -1)

                # Add noise to the center point
                noise = np.random.normal(scale=cfg.noise_init / 10,
                                         size=center_point.shape)
                pick_point = center_point + noise.astype(center_point.dtype)
                query_idx = self.input_trees[split][cloud_idx].query(
                    pick_point, k=cfg.num_points)[1][0]

                # Shuffle index
                query_idx = DP.shuffle_idx(query_idx)

                # Get corresponding points and colors based on the index
                queried_pc_xyz = points[query_idx]
                queried_pc_xyz[:,
                               0:2] = queried_pc_xyz[:, 0:2] - pick_point[:,
                                                                          0:2]
                #queried_pc_colors = self.input_colors[split][cloud_idx][query_idx]
                if split == 'test':
                    queried_pc_labels = np.zeros(queried_pc_xyz.shape[0])
                    queried_pt_weight = 1
                else:
                    queried_pc_labels = self.input_labels[split][cloud_idx][
                        query_idx]
                    queried_pc_labels = np.array(
                        [self.label_to_idx[l] for l in queried_pc_labels])
                    queried_pt_weight = np.array([
                        self.class_weight[split][0][n]
                        for n in queried_pc_labels
                    ])

                # Update the possibility of the selected points
                dists = np.sum(np.square(
                    (points[query_idx] - pick_point).astype(np.float32)),
                               axis=1)
                delta = np.square(1 -
                                  dists / np.max(dists)) * queried_pt_weight
                self.possibility[split][cloud_idx][query_idx] += delta
                self.min_possibility[split][cloud_idx] = float(
                    np.min(self.possibility[split][cloud_idx]))
                if True:
                    yield (queried_pc_xyz, queried_pc_labels,
                           query_idx.astype(np.int32),
                           np.array([cloud_idx], dtype=np.int32))
Ejemplo n.º 10
0
        def spatially_regular_gen():
            # Generator loop
            for i in range(num_per_epoch):  # num_per_epoch

                # Choose a random cloud
                cloud_idx = int(np.argmin(self.min_possibility[split]))

                # choose the point with the minimum of possibility as query point
                point_ind = np.argmin(self.possibility[split][cloud_idx])

                # Get points from tree structure
                points = np.array(self.input_trees[split][cloud_idx].data, copy=False)

                # Center point of input region
                center_point = points[point_ind, :].reshape(1, -1)

                # Add noise to the center point
                noise = np.random.normal(scale=cfg.noise_init / 10, size=center_point.shape)
                pick_point = center_point + noise.astype(center_point.dtype)

                if len(points) < cfg.num_points:
                    queried_idx = self.input_trees[split][cloud_idx].query(pick_point, k=len(points))[1][0]
                else:
                    queried_idx = self.input_trees[split][cloud_idx].query(pick_point, k=cfg.num_points)[1][0]

                queried_idx = DP.shuffle_idx(queried_idx)
                # Collect points and colors
                queried_pc_xyz = points[queried_idx]
                queried_pc_xyz = queried_pc_xyz - pick_point
                queried_pc_colors = self.input_colors[split][cloud_idx][queried_idx]
                queried_pc_labels = self.input_labels[split][cloud_idx][queried_idx]

                dists = np.sum(np.square((points[queried_idx] - pick_point).astype(np.float32)), axis=1)
                delta = np.square(1 - dists / np.max(dists))
                self.possibility[split][cloud_idx][queried_idx] += delta
                self.min_possibility[split][cloud_idx] = float(np.min(self.possibility[split][cloud_idx]))

                if len(points) < cfg.num_points:
                    queried_pc_xyz, queried_pc_colors, queried_idx, queried_pc_labels = \
                        DP.data_aug(queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cfg.num_points)

                if True:
                    yield (queried_pc_xyz.astype(np.float32),
                           queried_pc_colors.astype(np.float32),
                           queried_pc_labels,
                           queried_idx.astype(np.int32),
                           np.array([cloud_idx], dtype=np.int32))
Ejemplo n.º 11
0
 def crop_pc(points, labels, search_tree, pick_idx):
     # crop a fixed size point cloud for training
     center_point = points[pick_idx, :].reshape(1, -1)
     select_idx = search_tree.query(center_point, k=cfg.num_points)[1][0]
     select_idx = DP.shuffle_idx(select_idx)
     select_points = points[select_idx]
     select_labels = labels[select_idx]
     return select_points, select_labels, select_idx
Ejemplo n.º 12
0
    def __init__(self, mode, test_area_idx):
        self.name = 'S3DIS'
        self.path = 'data/S3DIS'
        self.label_to_names = {
            0: 'ceiling',
            1: 'floor',
            2: 'wall',
            3: 'beam',
            4: 'column',
            5: 'window',
            6: 'door',
            7: 'table',
            8: 'chair',
            9: 'sofa',
            10: 'bookcase',
            11: 'board',
            12: 'clutter'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.array([])

        self.val_split = 'Area_' + str(test_area_idx)
        self.all_files = glob.glob(join(self.path, 'original_ply', '*.ply'))

        # Initiate containers
        self.val_proj = []
        self.val_labels = []
        self.possibility = {}
        self.min_possibility = {}
        self.input_trees = {'training': [], 'validation': []}
        self.input_colors = {'training': [], 'validation': []}
        self.input_labels = {'training': [], 'validation': []}
        self.input_names = {'training': [], 'validation': []}
        self.load_sub_sampled_clouds(cfg.sub_grid_size)

        # ES: ignored_label_inds, class_weights, and init lines of `get_batch_gen'.
        self.mode = mode

        self.possibility[self.mode] = []
        self.min_possibility[self.mode] = []
        # Random initialize
        for i, tree in enumerate(self.input_colors[self.mode]):
            self.possibility[self.mode] += [
                np.random.rand(tree.data.shape[0]) * 1e-3
            ]
            self.min_possibility[self.mode] += [
                float(np.min(self.possibility[self.mode][-1]))
            ]

        cfg.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        cfg.class_weights = DP.get_class_weights('S3DIS')
Ejemplo n.º 13
0
def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None;
    note: Physically, each room will generate four files, including raw_pc.ply, sub_pc.ply, sub_pc.pkl for the kdtree and proj_idx.pkl for each raw point's nearest neighbor in the sub_pc )
    """

    # store points and labels for the room(correspond to the anno_path), yc
    data_list = []

    for f in glob.glob(join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = 'clutter'
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    # translate the data by xyz_min--yc
    pc_label = np.concatenate(data_list, 0)  # Nx7 as a np object
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    # manage data types and save in PLY format--yc
    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = join(sub_pc_folder, save_path.split('/')[-1][:-4] + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = join(sub_pc_folder,
                        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    # nearest nb index list for xyz when searching using the sub-sampled PC generated kdtree--yc
    # https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
    proj_save = join(sub_pc_folder,
                     str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
Ejemplo n.º 14
0
    def __init__(self, test_id):
        self.name = 'SemanticKITTI'
        self.dataset_path = '/data/semantic_kitti/dataset/sequences_0.06'
        self.label_to_names = {
            0: 'unlabeled',
            1: 'car',
            2: 'bicycle',
            3: 'motorcycle',
            4: 'truck',
            5: 'other-vehicle',
            6: 'person',
            7: 'bicyclist',
            8: 'motorcyclist',
            9: 'road',
            10: 'parking',
            11: 'sidewalk',
            12: 'other-ground',
            13: 'building',
            14: 'fence',
            15: 'vegetation',
            16: 'trunk',
            17: 'terrain',
            18: 'pole',
            19: 'traffic-sign'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.val_split = '08'

        self.seq_list = np.sort(os.listdir(self.dataset_path))
        self.test_scan_number = str(test_id)
        self.train_list, self.val_list, self.test_list = DP.get_file_list(
            self.dataset_path, self.test_scan_number)
        self.train_list = DP.shuffle_list(self.train_list)
        self.val_list = DP.shuffle_list(self.val_list)

        self.possibility = []
        self.min_possibility = []
Ejemplo n.º 15
0
    def tf_map(self, batch_pc, batch_label, batch_pc_idx, batch_cloud_idx):
        features = batch_pc
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        for i in range(cfg.num_layers):
            neighbour_idx = DP.knn_search(batch_pc, batch_pc, cfg.k_n)
            sub_points = batch_pc[:, :batch_pc.shape[1] // cfg.sub_sampling_ratio[i], :]
            pool_i = neighbour_idx[:, :batch_pc.shape[1] // cfg.sub_sampling_ratio[i], :]
            up_i = DP.knn_search(sub_points, batch_pc, 1)
            input_points.append(batch_pc)
            input_neighbors.append(neighbour_idx)
            input_pools.append(pool_i)
            input_up_samples.append(up_i)
            batch_pc = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [features, batch_label, batch_pc_idx, batch_cloud_idx]

        return input_list
Ejemplo n.º 16
0
    def __init__(self, mode):
        self.name = 'raildata_RandLA'
        self.dataset_path = '/home/hwq/dataset/rail_randla_0.06'
        self.label_to_names = {0: 'unlabeled', 1: 'rail', 2: 'pole'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])  # [0,1,2]
        self.label_to_idx = {l: i
                             for i, l in enumerate(self.label_values)
                             }  # dict {0:0,1:1,2:2}
        self.ignored_labels = np.sort([0])
        self.mode = mode

        fns = sorted(os.listdir(join(self.dataset_path, 'velodyne')))
        train_index = np.load('./utils/rail_index/trainindex.npy')
        test_index = np.load('./utils/rail_index/testindex.npy')

        alldatapath = []
        for fn in fns:
            alldatapath.append(os.path.join(self.dataset_path, fn))
        # print(alldatapath,train_index)

        self.data_list = []
        if mode == 'training':
            for index in train_index:
                self.data_list.append(alldatapath[index])
        elif mode == 'validation':
            for index in test_index:
                self.data_list.append(alldatapath[index])
        elif mode == 'test':
            for index in test_index:
                self.data_list.append(alldatapath[index])
        self.data_list = np.asarray(self.data_list)
        self.data_list = DP.shuffle_list(self.data_list)
        cfg.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        cfg.class_weights = DP.get_class_weights('Rail')
Ejemplo n.º 17
0
def convert_for_test(filename, output_dir, grid_size=0.001, protocol="field"):

    original_pc_folder = os.path.join(output_dir, 'test')
    if not os.path.exists(original_pc_folder):
        os.mkdir(original_pc_folder)

    sub_pc_folder = os.path.join(output_dir, 'input_{:.3f}'.format(grid_size))
    if not os.path.exists(sub_pc_folder):
        os.mkdir(sub_pc_folder)

    basename = os.path.basename(filename)[:-4]

    data = numpy.loadtxt(filename)

    points = data[:, 0:3].astype(numpy.float32)

    if protocol == "synthetic" or protocol == "field_only_xyz":
        # TODO : hack must be remove
        colors = numpy.zeros((data.shape[0], 3), dtype=numpy.uint8)
    elif protocol == "field":
        adr = normalize(data[:, 3:-1]) * 255
        colors = adr.astype(numpy.uint8)
    else:
        exit("unknown protocol")

    field_names = ['x', 'y', 'z', 'red', 'green', 'blue']

    #Save original
    full_ply_path = os.path.join(original_pc_folder, basename + '.ply')
    helper_ply.write_ply(full_ply_path, [points, colors], field_names)

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors = DP.grid_sub_sampling(points,
                                               colors,
                                               grid_size=grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pc_folder, basename + '.ply')
    helper_ply.write_ply(sub_ply_file, [sub_xyz, sub_colors], field_names)
    labels = numpy.zeros(data.shape[0], dtype=numpy.uint8)

    search_tree = sklearn.neighbors.KDTree(sub_xyz, leaf_size=50)
    kd_tree_file = os.path.join(sub_pc_folder, basename + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = numpy.squeeze(search_tree.query(points, return_distance=False))
    proj_idx = proj_idx.astype(numpy.int32)
    proj_save = os.path.join(sub_pc_folder, basename + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
Ejemplo n.º 18
0
def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None
    """
    data_list = []

    for f in glob.glob(join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = 'clutter'
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = join(sub_pc_folder, save_path.split('/')[-1][:-4] + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = join(sub_pc_folder,
                        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
    proj_save = join(sub_pc_folder,
                     str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
Ejemplo n.º 19
0
 def crop_pc(mode, points, labels, search_tree, pick_idx):
     # crop a fixed size point cloud for training
     center_point = points[pick_idx, :].reshape(1, -1)  #[1,3]
     # print('rail_dataset line 86 :',(points).shape)
     # exit()
     # cfg.num_points = 512*(points.shape[0]//512)
     kk = points.shape[0]
     # select_idx = search_tree.query(center_point, k=cfg.num_points)[1][0]  #[45056,] 数值是[0-8w多]
     if mode is 'test':
         select_idx = np.random.randint(0, kk, (512 * (kk // 512), ))
     else:
         select_idx = np.random.randint(0, kk, (cfg.num_points, ))
     select_idx = DP.shuffle_idx(select_idx)
     select_points = points[select_idx]
     select_labels = labels[select_idx]
     return select_points, select_labels, select_idx
Ejemplo n.º 20
0
    def __init__(self, data_path, path_cls):
        self.path = data_path

        classes, label_values, class2labels, label2color, label2names = DP.get_info_classes(
            path_cls)
        self.label_values = np.array(label_values)

        # Initiate containers
        self.val_proj = []
        self.val_labels = []
        self.possibility = {}
        self.min_possibility = {}
        self.input_trees = {'validation': []}
        self.input_colors = {'validation': []}
        self.input_labels = {'validation': []}
        self.input_names = {'validation': []}
        self.input_full_xyz = {'validation': []}
Ejemplo n.º 21
0
    def __init__(self, config, dataset_name='SemanticKITTI'):
        super().__init__()
        self.config = config
        self.class_weights = DP.get_class_weights(dataset_name)

        self.fc0 = pt_utils.Conv1d(3, 8, kernel_size=1, bn=True)

        self.dilated_res_blocks = nn.ModuleList()
        d_in = 8
        for i in range(self.config.num_layers):
            d_out = self.config.d_out[i]
            self.dilated_res_blocks.append(Dilated_res_block(d_in, d_out))
            d_in = 2 * d_out

        d_out = d_in
        self.decoder_0 = pt_utils.Conv2d(d_in,
                                         d_out,
                                         kernel_size=(1, 1),
                                         bn=True)

        self.decoder_blocks = nn.ModuleList()
        for j in range(self.config.num_layers):
            if j < 3:
                d_in = d_out + 2 * self.config.d_out[-j - 2]
                d_out = 2 * self.config.d_out[-j - 2]
            else:
                d_in = 4 * self.config.d_out[-4]
                d_out = 2 * self.config.d_out[-4]
            self.decoder_blocks.append(
                pt_utils.Conv2d(d_in, d_out, kernel_size=(1, 1), bn=True))

        self.fc1 = pt_utils.Conv2d(d_out, 64, kernel_size=(1, 1), bn=True)
        self.fc2 = pt_utils.Conv2d(64, 32, kernel_size=(1, 1), bn=True)
        self.dropout = nn.Dropout(0.5)
        self.fc3 = pt_utils.Conv2d(32,
                                   self.config.num_classes,
                                   kernel_size=(1, 1),
                                   bn=False,
                                   activation=None)
Ejemplo n.º 22
0
    def load_sub_sampled_clouds(self, sub_grid_size):

        for cloud in natsorted(os.listdir(self.path)):

            cloud_name = cloud[:-4]

            full_ply_file = join(self.path, '{:s}.ply'.format(cloud_name))
            full_data = read_ply(full_ply_file)
            full_xyz = np.vstack(
                (full_data['x'], full_data['y'], full_data['z'])).T
            full_colors = np.vstack(
                (full_data['red'], full_data['green'], full_data['blue'])).T
            full_labels = full_data['class']

            xyz_min = np.amin(
                full_xyz, axis=0
            )[0:
              3]  # TODO probar sin esto, se ha de haber entrenado sin (data prepare)
            full_xyz -= xyz_min

            sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
                full_xyz, full_colors, full_labels, sub_grid_size)
            sub_colors = sub_colors / 255.0

            search_tree = KDTree(sub_xyz)

            proj_idx = np.squeeze(
                search_tree.query(full_xyz, return_distance=False))
            proj_idx = proj_idx.astype(np.int32)

            self.input_trees['validation'] += [search_tree]
            self.input_colors['validation'] += [sub_colors]
            self.input_labels['validation'] += [sub_labels]
            self.input_names['validation'] += [cloud_name]
            self.input_full_xyz['validation'] += [full_xyz]
            self.val_proj += [proj_idx]
            self.val_labels += [full_labels]
Ejemplo n.º 23
0
def train(args, io):
    train_loader = DataLoader(S3DIS(5, args.num_points, partition='train'),
                              num_workers=2,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(S3DIS(5, args.num_points, partition='val'),
                             num_workers=2,
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)
    # train_data = S3DIS(5, args.num_points, partition='train')
    # test_data = S3DIS(5, args.num_points, partition='val')

    device = torch.device("cuda" if args.cuda else "cpu")

    class_weights = torch.from_numpy(DP.get_class_weights('S3DIS')).to(device)
    class_weights = class_weights.float()
    model = Seg(args).to(device)
    print(str(model))
    model = nn.DataParallel(model)
    print('num_points:%s, batch_size:%s, %s' %
          (args.num_points, args.batch_size, args.test_batch_size))

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)

    criterion = seg_loss
    best_test_acc = 0

    for epoch in range(args.epochs):
        scheduler.step()
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        idx = 0
        total_time = 0.0
        # logits_ = []
        # label_ = []
        # for i in range(train_data.__len__()):
        #     data, label = train_data.__getitem__(i)
        #     data, label = torch.from_numpy(data).to(device), torch.from_numpy(label).to(device).squeeze()
        #     data, label = torch.unsqueeze(data, 0), torch.unsqueeze(label, 0)
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = args.batch_size

            start_time = time.time()
            logits = model(data)
            end_time = time.time()
            total_time += (end_time - start_time)
            # logits_.append(logits)
            # label_.append(label)

            # if len(logits_) < batch_size:
            #     continue

            opt.zero_grad()
            # logits, label = torch.cat(logits_, dim=-1), torch.cat(label_, dim=-1)
            loss = get_loss(logits, label, class_weights)
            loss.backward()
            # logits_.clear()
            # label_.clear()
            opt.step()

            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true += label.cpu().numpy().tolist()[0]
            train_pred += preds.detach().cpu().numpy().tolist()[0]

        print('train total time is', total_time)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        if epoch % 5 == 0:
            test_loss = 0.0
            count = 0.0
            model.eval()
            test_pred = []
            test_true = []
            total_time = 0.0
            idx = 0
            # logits_ = []
            # label_ = []
            # for i in range(test_data.__len__()):
            #     data, label = test_data.__getitem__(i)
            #     data, label = torch.from_numpy(data).to(device), torch.from_numpy(label).to(device).squeeze()
            #     data, label = torch.unsqueeze(data, 0), torch.unsqueeze(label, 0)
            for data, label in test_loader:
                data, label = data.to(device), label.to(device).squeeze()
                data = data.permute(0, 2, 1)
                batch_size = args.test_batch_size

                start_time = time.time()
                logits = model(data)
                end_time = time.time()
                total_time += (end_time - start_time)
                # logits_.append(logits)
                # label_.append(label)

                # if len(logits_) < batch_size:
                #     continue

                # logits, label = torch.cat(logits_, dim=-1), torch.cat(label_, dim=-1)
                loss = get_loss(logits, label, class_weights)
                # logits_.clear()
                # label_.clear()

                preds = logits.max(dim=1)[1]
                count += batch_size
                test_loss += loss.item() * batch_size
                test_true += label.cpu().numpy().tolist()[0]
                test_pred += preds.detach().cpu().numpy().tolist()[0]

            print('test total time is', total_time)
            test_acc = metrics.accuracy_score(test_true, test_pred)
            avg_per_class_acc = metrics.balanced_accuracy_score(
                test_true, test_pred)
            outstr = '*** Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
                epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc)
            io.cprint(outstr)
            if test_acc >= best_test_acc:
                best_test_acc = test_acc
                print('save new best model acc: %s' % best_test_acc)
                torch.save(model.state_dict(),
                           'checkpoints/%s/models/model.t7' % args.exp_name)
Ejemplo n.º 24
0
    def __init__(self, dataset, config):

        # obtain the dataset iterator's next element under the hood
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    '{}/Log_%Y-%m-%d_%H-%M-%S'.format(config.results_dir),
                    time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        # use inputs(a dict) variable to map the flat_inputs
        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers

            # correspond to the flat_inputs defined in get_tf_mapping2() in main_S3DIS_SQN.py
            # HACK: for encoder, it needs the original points, so add it to the first element of this array.
            self.inputs['original_xyz'] = flat_inputs[
                4 *
                num_layers]  # features containing xyz and feature, (B,N,3+C)
            self.inputs['xyz'] = (
                self.inputs['original_xyz'],
            ) + flat_inputs[:
                            num_layers]  # xyz_original plus xyz(points) of sub_pc at all the sub_sampling stages, containing num_layers items
            self.inputs['neigh_idx'] = flat_inputs[
                num_layers:2 *
                num_layers]  # neighbour id, containing num_layers items
            self.inputs['sub_idx'] = flat_inputs[
                2 * num_layers:3 *
                num_layers]  # sub_sampled idx, containing num_layers items
            self.inputs['interp_idx'] = flat_inputs[
                3 * num_layers:4 *
                num_layers]  # interpolation idx (nearest idx in the sub_pc for all raw pts), containing num_layers items
            self.inputs['features'] = flat_inputs[
                4 * num_layers +
                1]  # features containing xyz and feature, (B,N,3+C)
            self.inputs['labels'] = flat_inputs[4 * num_layers + 2]
            self.inputs['weak_label_masks'] = flat_inputs[4 * num_layers + 3]
            self.inputs['input_inds'] = flat_inputs[
                4 * num_layers +
                4]  # input_inds for each batch 's point in the sub_pc
            self.inputs['cloud_inds'] = flat_inputs[
                4 * num_layers + 5]  # cloud_inds for each batch

            self.points = self.inputs['original_xyz']  # (B,N,3)
            self.labels = self.inputs['labels']  # (B,N)
            self.weak_label_masks = self.inputs[
                'weak_label_masks']  # weak label mask for weakly semseg, (B,N)
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) + '.txt',
                'a')

        with tf.variable_scope('layers'):
            self.logits, self.weak_labels = self.inference(
                self.inputs, self.is_training)  # (n, num_classes), (n,)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(
                self.logits, [-1, config.num_classes])  # (n, num_classes)
            self.weak_labels = tf.reshape(self.weak_labels, [-1])  # (n,)
            # TODO: which to use, WCE, CE or smooth label
            self.loss = self.get_loss_Sqn(self.logits, self.weak_labels)

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            # self.correct_prediction = tf.nn.in_top_k(valid_logits, valid_labels, 1)
            self.correct_prediction = tf.nn.in_top_k(self.logits,
                                                     self.weak_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)  # (n,C)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 25
0
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    'results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None
        # use inputs(a dict) variable to map the flat_inputs
        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs[
                'xyz'] = flat_inputs[:
                                     num_layers]  # xyz(points) of sub_pc at all the sub_sampling stages, containing num_layers items
            self.inputs['neigh_idx'] = flat_inputs[
                num_layers:2 *
                num_layers]  # neighbour id, containing num_layers items
            self.inputs['sub_idx'] = flat_inputs[
                2 * num_layers:3 *
                num_layers]  # sub_sampled idx, containing num_layers items
            self.inputs['interp_idx'] = flat_inputs[
                3 * num_layers:4 *
                num_layers]  # interpolation idx (nearest idx in the sub_pc for all raw pts), containing num_layers items
            self.inputs['features'] = flat_inputs[
                4 *
                num_layers]  # features containing xyz and feature, (B,N,3+C)
            self.inputs['labels'] = flat_inputs[4 * num_layers + 1]
            self.inputs['input_inds'] = flat_inputs[
                4 * num_layers +
                2]  # input_inds for each batch 's point in the sub_pc
            self.inputs['cloud_inds'] = flat_inputs[
                4 * num_layers + 3]  # cloud_inds for each batch

            self.labels = self.inputs['labels']
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) + '.txt',
                'a')

        with tf.variable_scope('layers'):
            self.logits = self.inference(self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)  # (B,N)
            for ign_label in self.config.ignored_label_inds:  # e.g., ignore 12, [12]
                ignored_bool = tf.logical_or(
                    ignored_bool, tf.equal(self.labels,
                                           ign_label))  # bool tensor, (B,N)

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights)

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(valid_logits,
                                                     valid_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
    seq_path_out = join(output_path, seq_id)
    pc_path = join(seq_path, 'velodyne')
    pc_path_out = join(seq_path_out, 'velodyne')
    KDTree_path_out = join(seq_path_out, 'KDTree')
    os.makedirs(seq_path_out) if not exists(seq_path_out) else None
    os.makedirs(pc_path_out) if not exists(pc_path_out) else None
    os.makedirs(KDTree_path_out) if not exists(KDTree_path_out) else None

    if int(seq_id) < 11:
        label_path = join(seq_path, 'labels')
        label_path_out = join(seq_path_out, 'labels')
        os.makedirs(label_path_out) if not exists(label_path_out) else None
        scan_list = np.sort(os.listdir(pc_path))
        for scan_id in scan_list:
            print(scan_id)
            points = DP.load_pc_kitti(join(pc_path, scan_id))
            labels = DP.load_label_kitti(
                join(label_path,
                     str(scan_id[:-4]) + '.label'), remap_lut)
            sub_points, sub_labels = DP.grid_sub_sampling(points,
                                                          labels=labels,
                                                          grid_size=grid_size)
            search_tree = KDTree(sub_points)
            KDTree_save = join(KDTree_path_out, str(scan_id[:-4]) + '.pkl')
            np.save(join(pc_path_out, scan_id)[:-4], sub_points)
            np.save(join(label_path_out, scan_id)[:-4], sub_labels)
            with open(KDTree_save, 'wb') as f:
                pickle.dump(search_tree, f)
            if seq_id == '08':
                proj_path = join(seq_path_out, 'proj')
                os.makedirs(proj_path) if not exists(proj_path) else None
Ejemplo n.º 27
0
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    'results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs['xyz'] = flat_inputs[:num_layers]
            self.inputs['neigh_idx'] = flat_inputs[num_layers:2 * num_layers]
            self.inputs['sub_idx'] = flat_inputs[2 * num_layers:3 * num_layers]
            self.inputs['interp_idx'] = flat_inputs[3 * num_layers:4 *
                                                    num_layers]
            self.inputs['features'] = flat_inputs[4 * num_layers]
            self.inputs['labels'] = flat_inputs[4 * num_layers + 1]
            self.inputs['input_inds'] = flat_inputs[4 * num_layers + 2]
            self.inputs['cloud_inds'] = flat_inputs[4 * num_layers + 3]

            K_points_numpy = np.array(fibonacci_sphere(self.config.k_n - 1))
            K_points_numpy = np.concatenate((np.array(
                (0, 0, 0))[None, :], K_points_numpy),
                                            axis=0)
            self.inputs['K_points'] = tf.Variable(K_points_numpy.astype(
                np.float32),
                                                  name='kernel_points',
                                                  trainable=False,
                                                  dtype=tf.float32)

            self.labels = self.inputs['labels']
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) + '.txt',
                'a')

        with tf.variable_scope('layers'):
            self.logits = self.inference(self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
            for ign_label in self.config.ignored_label_inds:
                ignored_bool = tf.logical_or(ignored_bool,
                                             tf.equal(self.labels, ign_label))

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights)

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(valid_logits,
                                                     valid_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        c_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
        c_proto.allow_soft_placement = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 28
0
    def test(self, model, dataset, num_votes=100):

        # Smoothing parameter for votes
        test_smooth = 0.95

        # Initialise iterator with validation/test data
        self.sess.run(dataset.val_init_op)

        # Number of points per class in validation set
        val_proportions = np.zeros(model.config.num_classes, dtype=np.float32)
        i = 0
        for label_val in dataset.label_values:
            if label_val not in dataset.ignored_labels:
                val_proportions[i] = np.sum([
                    np.sum(labels == label_val)
                    for labels in dataset.val_labels
                ])
                i += 1

        # Test saving path
        saving_path = join('results', 'Log_test_{}'.format(
            model.config.test_area))  # %Y-%m-%d_%H-%M-%S', time.gmtime())
        test_path = join('test', saving_path.split('/')[-1])
        makedirs(test_path) if not exists(test_path) else None
        makedirs(join(
            test_path,
            'val_preds')) if not exists(join(test_path, 'val_preds')) else None

        step_id = 0
        epoch_id = 0
        last_min = -0.5

        while last_min < num_votes:
            try:
                ops = (
                    self.prob_logits,
                    model.labels,
                    model.inputs['input_inds'],
                    model.inputs['cloud_inds'],
                )

                stacked_probs, stacked_labels, point_idx, cloud_idx = self.sess.run(
                    ops, {model.is_training: False})
                correct = np.sum(
                    np.argmax(stacked_probs, axis=1) == stacked_labels)
                acc = correct / float(np.prod(np.shape(stacked_labels)))
                print('step' + str(step_id) + ' acc:' + str(acc))
                stacked_probs = np.reshape(stacked_probs, [
                    model.config.val_batch_size, model.config.num_points,
                    model.config.num_classes
                ])

                for j in range(np.shape(stacked_probs)[0]):
                    probs = stacked_probs[j, :, :]
                    p_idx = point_idx[j, :]
                    c_i = cloud_idx[j][0]
                    self.test_probs[c_i][
                        p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (
                            1 - test_smooth) * probs
                step_id += 1

            except tf.errors.OutOfRangeError:

                new_min = np.min(dataset.min_possibility['validation'])
                log_out(
                    'Epoch {:3d}, end. Min possibility = {:.1f}'.format(
                        epoch_id, new_min), self.Log_file)

                if last_min + 1 < new_min:

                    # Update last_min
                    last_min += 1

                    # Show vote results (On subcloud so it is not the good values here)
                    log_out('\nConfusion on sub clouds', self.Log_file)
                    confusion_list = []

                    num_val = len(dataset.input_labels['validation'])

                    for i_test in range(num_val):
                        probs = self.test_probs[i_test]
                        preds = dataset.label_values[np.argmax(
                            probs, axis=1)].astype(np.int32)
                        labels = dataset.input_labels['validation'][i_test]

                        # Confs
                        confusion_list += [
                            confusion_matrix(labels, preds,
                                             dataset.label_values)
                        ]

                    # Regroup confusions
                    C = np.sum(np.stack(confusion_list),
                               axis=0).astype(np.float32)

                    # Rescale with the right number of point per class
                    C *= np.expand_dims(
                        val_proportions / (np.sum(C, axis=1) + 1e-6), 1)

                    # Compute IoUs
                    IoUs = DP.IoU_from_confusions(C)
                    m_IoU = np.mean(IoUs)
                    s = '{:5.2f} | '.format(100 * m_IoU)
                    for IoU in IoUs:
                        s += '{:5.2f} '.format(100 * IoU)
                    log_out(s + '\n', self.Log_file)

                    if int(np.ceil(new_min)) % 1 == 0:

                        # Project predictions
                        log_out(
                            '\nReproject Vote #{:d}'.format(
                                int(np.floor(new_min))), self.Log_file)
                        proj_probs_list = []

                        for i_val in range(num_val):
                            # Reproject probs back to the evaluations points
                            proj_idx = dataset.val_proj[i_val]
                            probs = self.test_probs[i_val][proj_idx, :]
                            proj_probs_list += [probs]

                        # Show vote results
                        log_out('Confusion on full clouds', self.Log_file)
                        confusion_list = []
                        for i_test in range(num_val):
                            # Get the predicted labels
                            preds = dataset.label_values[np.argmax(
                                proj_probs_list[i_test],
                                axis=1)].astype(np.uint8)

                            # Confusion
                            labels = dataset.val_labels[i_test]
                            acc = np.sum(preds == labels) / len(labels)
                            log_out(
                                dataset.input_names['validation'][i_test] +
                                ' Acc:' + str(acc), self.Log_file)

                            confusion_list += [
                                confusion_matrix(labels, preds,
                                                 dataset.label_values)
                            ]
                            name = dataset.input_names['validation'][
                                i_test] + '.ply'
                            write_ply(join(test_path, 'val_preds', name),
                                      [preds, labels], ['pred', 'label'])

                        # Regroup confusions
                        C = np.sum(np.stack(confusion_list), axis=0)

                        IoUs = DP.IoU_from_confusions(C)
                        m_IoU = np.mean(IoUs)
                        s = '{:5.2f} | '.format(100 * m_IoU)
                        for IoU in IoUs:
                            s += '{:5.2f} '.format(100 * IoU)
                        log_out('-' * len(s), self.Log_file)
                        log_out(s, self.Log_file)
                        log_out('-' * len(s) + '\n', self.Log_file)
                        print('finished \n')
                        self.sess.close()
                        return

                self.sess.run(dataset.val_init_op)
                epoch_id += 1
                step_id = 0
                continue

        return
Ejemplo n.º 29
0
print("original_pc_folder: " + original_pc_folder)
print("sub_pc_folder: " + sub_pc_folder)

#dataset_path '/media/peterzhu/DATA/data/semantic3d/original_data'
#original_pc_folder '/media/peterzhu/DATA/data/semantic3d/original_ply'
#sub_pc_folder '/media/peterzhu/DATA/data/semantic3d/input_0.06'

for pc_path in sorted(glob.glob(join(dataset_path, '*.txt'))):
    print(pc_path)
    file_name = pc_path.split('/')[-1][:-4]

    # check if it has already calculated
    if exists(join(sub_pc_folder, file_name + '_KDTree.pkl')):
        continue

    pc = DP.load_pc_semantic3d(pc_path)
    pc_copy = np.array(pc)
    print(pc_copy.shape)
    print(pc_copy.min())
    print(pc_copy.max())
    # check if label exists
    label_path = pc_path[:-4] + '.labels'

    if exists(label_path):
        labels = DP.load_label_semantic3d(label_path)
        full_ply_path = join(original_pc_folder, file_name + '.ply')

        #  Subsample to save space
        sub_points, sub_colors, sub_labels = DP.grid_sub_sampling(
            pc[:, :3].astype(np.float32), pc[:, 4:7].astype(np.uint8), labels,
            0.01)
Ejemplo n.º 30
0
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    'results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs['features'] = flat_inputs[0]
            self.inputs['labels'] = flat_inputs[1]
            self.inputs['input_inds'] = flat_inputs[2]
            self.inputs['cloud_inds'] = flat_inputs[3]

            self.labels = self.inputs['labels']
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.time_stamp = time.strftime('_%Y-%m-%d_%H-%M-%S',
                                            time.gmtime())
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) +
                self.time_stamp + '.txt', 'a')

        with tf.variable_scope('layers'):
            self.logits, self.new_xyz, self.xyz = self.inference(
                self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
            for ign_label in self.config.ignored_label_inds:
                ignored_bool = tf.logical_or(ignored_bool,
                                             tf.equal(self.labels, ign_label))

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            aug_loss_weights = tf.constant([0.1, 0.1, 0.3, 0.5, 0.5])
            aug_loss = 0
            for i in range(self.config.num_layers):
                centroids = tf.reduce_mean(self.new_xyz[i], axis=2)
                relative_dis = tf.sqrt(
                    tf.reduce_sum(tf.square(centroids -
                                            self.xyz[i]), axis=-1) + 1e-12)
                aug_loss = aug_loss + aug_loss_weights[i] * tf.reduce_mean(
                    tf.reduce_mean(relative_dis, axis=-1), axis=-1)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights) + aug_loss

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(valid_logits,
                                                     valid_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())