def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None
    """
    data_list = []

    for f in glob.glob(join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = 'clutter'
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
Beispiel #2
0
def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None;
    note: Physically, each room will generate four files, including raw_pc.ply, sub_pc.ply, sub_pc.pkl for the kdtree and proj_idx.pkl for each raw point's nearest neighbor in the sub_pc )
    """

    # store points and labels for the room(correspond to the anno_path), yc
    data_list = []

    for f in glob.glob(join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = 'clutter'
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    # translate the data by xyz_min--yc
    pc_label = np.concatenate(data_list, 0)  # Nx7 as a np object
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    # manage data types and save in PLY format--yc
    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = join(sub_pc_folder, save_path.split('/')[-1][:-4] + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = join(sub_pc_folder,
                        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    # nearest nb index list for xyz when searching using the sub-sampled PC generated kdtree--yc
    # https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
    proj_save = join(sub_pc_folder,
                     str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
Beispiel #3
0
def convert(filename, output_dir):

    data = numpy.loadtxt(filename)

    output_filename = os.path.join(output_dir, os.path.basename(filename))

    points = data[:, 0:3]
    colors = data[:, 3:6]

    field_names = ['x', 'y', 'z', 'red', 'green', 'blue']

    helper_ply.write_ply(output_filename, [points, colors], field_names)
Beispiel #4
0
def convert_for_test(filename, output_dir, grid_size=0.001, protocol="field"):

    original_pc_folder = os.path.join(output_dir, 'test')
    if not os.path.exists(original_pc_folder):
        os.mkdir(original_pc_folder)

    sub_pc_folder = os.path.join(output_dir, 'input_{:.3f}'.format(grid_size))
    if not os.path.exists(sub_pc_folder):
        os.mkdir(sub_pc_folder)

    basename = os.path.basename(filename)[:-4]

    data = numpy.loadtxt(filename)

    points = data[:, 0:3].astype(numpy.float32)

    if protocol == "synthetic" or protocol == "field_only_xyz":
        # TODO : hack must be remove
        colors = numpy.zeros((data.shape[0], 3), dtype=numpy.uint8)
    elif protocol == "field":
        adr = normalize(data[:, 3:-1]) * 255
        colors = adr.astype(numpy.uint8)
    else:
        exit("unknown protocol")

    field_names = ['x', 'y', 'z', 'red', 'green', 'blue']

    #Save original
    full_ply_path = os.path.join(original_pc_folder, basename + '.ply')
    helper_ply.write_ply(full_ply_path, [points, colors], field_names)

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors = DP.grid_sub_sampling(points,
                                               colors,
                                               grid_size=grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pc_folder, basename + '.ply')
    helper_ply.write_ply(sub_ply_file, [sub_xyz, sub_colors], field_names)
    labels = numpy.zeros(data.shape[0], dtype=numpy.uint8)

    search_tree = sklearn.neighbors.KDTree(sub_xyz, leaf_size=50)
    kd_tree_file = os.path.join(sub_pc_folder, basename + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = numpy.squeeze(search_tree.query(points, return_distance=False))
    proj_idx = proj_idx.astype(numpy.int32)
    proj_save = os.path.join(sub_pc_folder, basename + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
Beispiel #5
0
def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None
    """
    data_list = []

    for f in glob.glob(join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = 'clutter'
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = join(sub_pc_folder, save_path.split('/')[-1][:-4] + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = join(sub_pc_folder,
                        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
    proj_save = join(sub_pc_folder,
                     str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
Beispiel #6
0
        full_ply_path = join(original_pc_folder, file_name + '.ply')

        #  Subsample to save space
        sub_points, sub_colors, sub_labels = DP.grid_sub_sampling(
            pc[:, :3].astype(np.float32), pc[:, 4:7].astype(np.uint8), labels,
            0.01)
        print(np.array(sub_points).shape)
        print(np.array(sub_colors).shape)
        print(np.array(sub_labels).shape)
        print("sub_points min:" + str(sub_points.min()))
        print("sub_points max:" + str(sub_points.max()))

        sub_labels = np.squeeze(sub_labels)
        #Directory
        print("writing FULL ply files    *****Start*****")
        write_ply(full_ply_path, (sub_points, sub_colors, sub_labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
        print("writing FULL ply files    *****End*****")

        # save sub_cloud and KDTree file
        sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
            sub_points, sub_colors, sub_labels, grid_size)
        sub_colors = sub_colors / 255.0
        sub_labels = np.squeeze(sub_labels)
        sub_ply_file = join(sub_pc_folder, file_name + '.ply')
        print("writing ply files    *****Start*****")
        write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
        print("writing ply files *****End*****")

        search_tree = KDTree(sub_xyz, leaf_size=50)
        kd_tree_file = join(sub_pc_folder, file_name + '_KDTree.pkl')
Beispiel #7
0
    def test(self, model, dataset, num_votes=100):

        # Smoothing parameter for votes
        test_smooth = 0.95

        # Initialise iterator with validation/test data
        self.sess.run(dataset.val_init_op)

        # Number of points per class in validation set
        val_proportions = np.zeros(model.config.num_classes, dtype=np.float32)
        i = 0
        for label_val in dataset.label_values:
            if label_val not in dataset.ignored_labels:
                val_proportions[i] = np.sum([
                    np.sum(labels == label_val)
                    for labels in dataset.val_labels
                ])
                i += 1

        # Test saving path
        saving_path = join('results', 'Log_test_{}'.format(
            model.config.test_area))  # %Y-%m-%d_%H-%M-%S', time.gmtime())
        test_path = join('test', saving_path.split('/')[-1])
        makedirs(test_path) if not exists(test_path) else None
        makedirs(join(
            test_path,
            'val_preds')) if not exists(join(test_path, 'val_preds')) else None

        step_id = 0
        epoch_id = 0
        last_min = -0.5

        while last_min < num_votes:
            try:
                ops = (
                    self.prob_logits,
                    model.labels,
                    model.inputs['input_inds'],
                    model.inputs['cloud_inds'],
                )

                stacked_probs, stacked_labels, point_idx, cloud_idx = self.sess.run(
                    ops, {model.is_training: False})
                correct = np.sum(
                    np.argmax(stacked_probs, axis=1) == stacked_labels)
                acc = correct / float(np.prod(np.shape(stacked_labels)))
                print('step' + str(step_id) + ' acc:' + str(acc))
                stacked_probs = np.reshape(stacked_probs, [
                    model.config.val_batch_size, model.config.num_points,
                    model.config.num_classes
                ])

                for j in range(np.shape(stacked_probs)[0]):
                    probs = stacked_probs[j, :, :]
                    p_idx = point_idx[j, :]
                    c_i = cloud_idx[j][0]
                    self.test_probs[c_i][
                        p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (
                            1 - test_smooth) * probs
                step_id += 1

            except tf.errors.OutOfRangeError:

                new_min = np.min(dataset.min_possibility['validation'])
                log_out(
                    'Epoch {:3d}, end. Min possibility = {:.1f}'.format(
                        epoch_id, new_min), self.Log_file)

                if last_min + 1 < new_min:

                    # Update last_min
                    last_min += 1

                    # Show vote results (On subcloud so it is not the good values here)
                    log_out('\nConfusion on sub clouds', self.Log_file)
                    confusion_list = []

                    num_val = len(dataset.input_labels['validation'])

                    for i_test in range(num_val):
                        probs = self.test_probs[i_test]
                        preds = dataset.label_values[np.argmax(
                            probs, axis=1)].astype(np.int32)
                        labels = dataset.input_labels['validation'][i_test]

                        # Confs
                        confusion_list += [
                            confusion_matrix(labels, preds,
                                             dataset.label_values)
                        ]

                    # Regroup confusions
                    C = np.sum(np.stack(confusion_list),
                               axis=0).astype(np.float32)

                    # Rescale with the right number of point per class
                    C *= np.expand_dims(
                        val_proportions / (np.sum(C, axis=1) + 1e-6), 1)

                    # Compute IoUs
                    IoUs = DP.IoU_from_confusions(C)
                    m_IoU = np.mean(IoUs)
                    s = '{:5.2f} | '.format(100 * m_IoU)
                    for IoU in IoUs:
                        s += '{:5.2f} '.format(100 * IoU)
                    log_out(s + '\n', self.Log_file)

                    if int(np.ceil(new_min)) % 1 == 0:

                        # Project predictions
                        log_out(
                            '\nReproject Vote #{:d}'.format(
                                int(np.floor(new_min))), self.Log_file)
                        proj_probs_list = []

                        for i_val in range(num_val):
                            # Reproject probs back to the evaluations points
                            proj_idx = dataset.val_proj[i_val]
                            probs = self.test_probs[i_val][proj_idx, :]
                            proj_probs_list += [probs]

                        # Show vote results
                        log_out('Confusion on full clouds', self.Log_file)
                        confusion_list = []
                        for i_test in range(num_val):
                            # Get the predicted labels
                            preds = dataset.label_values[np.argmax(
                                proj_probs_list[i_test],
                                axis=1)].astype(np.uint8)

                            # Confusion
                            labels = dataset.val_labels[i_test]
                            acc = np.sum(preds == labels) / len(labels)
                            log_out(
                                dataset.input_names['validation'][i_test] +
                                ' Acc:' + str(acc), self.Log_file)

                            confusion_list += [
                                confusion_matrix(labels, preds,
                                                 dataset.label_values)
                            ]
                            name = dataset.input_names['validation'][
                                i_test] + '.ply'
                            write_ply(join(test_path, 'val_preds', name),
                                      [preds, labels], ['pred', 'label'])

                        # Regroup confusions
                        C = np.sum(np.stack(confusion_list), axis=0)

                        IoUs = DP.IoU_from_confusions(C)
                        m_IoU = np.mean(IoUs)
                        s = '{:5.2f} | '.format(100 * m_IoU)
                        for IoU in IoUs:
                            s += '{:5.2f} '.format(100 * IoU)
                        log_out('-' * len(s), self.Log_file)
                        log_out(s, self.Log_file)
                        log_out('-' * len(s) + '\n', self.Log_file)
                        print('finished \n')
                        self.sess.close()
                        return

                self.sess.run(dataset.val_init_op)
                epoch_id += 1
                step_id = 0
                continue

        return
            else:
                xyz, rgb = DP.read_ply_data(pc_path,
                                            with_rgb=True,
                                            with_label=False)
                labels = np.zeros(len(xyz), dtype=np.uint8)

            sub_ply_file = join(out_folder, cloud_name + '.ply')
            if sample_type == 'grid':
                sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(
                    xyz, rgb, labels, grid_size)
            else:
                sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(
                    xyz, rgb, labels, random_sample_ratio)

            sub_rgb = sub_rgb / 255.0
            sub_labels = np.squeeze(sub_labels)
            write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels],
                      ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            search_tree = KDTree(sub_xyz, leaf_size=50)
            kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl')
            with open(kd_tree_file, 'wb') as f:
                pickle.dump(search_tree, f)

            proj_idx = np.squeeze(search_tree.query(xyz,
                                                    return_distance=False))
            proj_idx = proj_idx.astype(np.int32)
            proj_save = join(out_folder, cloud_name + '_proj.pkl')
            with open(proj_save, 'wb') as f:
                pickle.dump([proj_idx, labels], f)
Beispiel #9
0
    def test(self, model, dataset, num_votes=100):

        # Smoothing parameter for votes
        test_smooth = 0.98

        # Initialise iterator with train data
        self.sess.run(dataset.test_init_op)

        # Test saving path
        saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S',
                                    time.gmtime())
        test_path = join('test', saving_path.split('/')[-1])
        makedirs(test_path) if not exists(test_path) else None
        makedirs(join(test_path, 'predictions')) if not exists(
            join(test_path, 'predictions')) else None
        makedirs(
            join(test_path,
                 'probs')) if not exists(join(test_path, 'probs')) else None

        #####################
        # Network predictions
        #####################

        step_id = 0
        epoch_id = 0
        last_min = -0.5

        while last_min < num_votes:

            try:
                ops = (
                    self.prob_logits,
                    model.labels,
                    model.inputs['input_inds'],
                    model.inputs['cloud_inds'],
                )

                stacked_probs, stacked_labels, point_idx, cloud_idx = self.sess.run(
                    ops, {model.is_training: False})
                stacked_probs = np.reshape(stacked_probs, [
                    model.config.val_batch_size, model.config.num_points,
                    model.config.num_classes
                ])

                for j in range(np.shape(stacked_probs)[0]):
                    probs = stacked_probs[j, :, :]
                    inds = point_idx[j, :]
                    c_i = cloud_idx[j][0]
                    self.test_probs[c_i][inds] = test_smooth * self.test_probs[
                        c_i][inds] + (1 - test_smooth) * probs

                step_id += 1
                log_string(
                    'Epoch {:3d}, step {:3d}. min possibility = {:.1f}'.format(
                        epoch_id, step_id,
                        np.min(dataset.min_possibility['test'])), self.log_out)

            except tf.errors.OutOfRangeError:

                # Save predicted cloud
                new_min = np.min(dataset.min_possibility['test'])
                log_string(
                    'Epoch {:3d}, end. Min possibility = {:.1f}'.format(
                        epoch_id, new_min), self.log_out)
                print("last_min " + str(last_min))
                print("new_min " + str(new_min))
                files = dataset.test_files
                for j, file_path in enumerate(files):
                    cloud_name = file_path.split('/')[-1]
                    print("cloud_name " + str(cloud_name))
                    ascii_name = join(test_path, 'predictions',
                                      dataset.ascii_files[cloud_name])
                    print("ascii_name " + str(ascii_name))

                if last_min + 4 < new_min:

                    print('Saving clouds')

                    # Update last_min
                    last_min = new_min

                    # Project predictions
                    print('\nReproject Vote #{:d}'.format(
                        int(np.floor(new_min))))
                    t1 = time.time()

                    files = dataset.test_files
                    i_test = 0
                    for i, file_path in enumerate(files):
                        # Get file
                        points = self.load_evaluation_points(file_path)
                        points = points.astype(np.float16)

                        # Reproject probs
                        probs = np.zeros(shape=[np.shape(points)[0], 8],
                                         dtype=np.float16)
                        proj_index = dataset.test_proj[i_test]

                        probs = self.test_probs[i_test][proj_index, :]

                        # Insert false columns for ignored labels
                        probs2 = probs
                        for l_ind, label_value in enumerate(
                                dataset.label_values):
                            if label_value in dataset.ignored_labels:
                                probs2 = np.insert(probs2, l_ind, 0, axis=1)

                        # Get the predicted labels
                        preds = dataset.label_values[np.argmax(
                            probs2, axis=1)].astype(np.uint8)

                        # Save plys
                        cloud_name = file_path.split('/')[-1]
                        print("cloud_name " + str(cloud_name))

                        #New-Add
                        rgb = self.load_evaluation_rgb(file_path)
                        write_ply(
                            join(test_path, 'predictions',
                                 cloud_name), [points, rgb, preds],
                            ['x', 'y', 'z', 'red', 'green', 'blue', 'pred'])

                        # Save ascii preds
                        ascii_name = join(test_path, 'predictions',
                                          dataset.ascii_files[cloud_name])
                        print("ascii_name " + str(ascii_name))
                        np.savetxt(ascii_name, preds, fmt='%d')
                        log_string(ascii_name + 'has saved', self.log_out)
                        i_test += 1

                    t2 = time.time()
                    print('Done in {:.1f} s\n'.format(t2 - t1))
                    self.sess.close()
                    return

                self.sess.run(dataset.test_init_op)
                epoch_id += 1
                step_id = 0
                continue
        return
def convert_pc2plyandweaklabels(anno_path, save_path, sub_pc_folder,
                                weak_label_folder, weak_label_ratio,
                                sub_grid_size, gt_class, gt_class2label):
    """
    Convert original dataset files (consiting of rooms) to ply file and weak labels. Physically, each room will generate several files, including raw_pc.ply, sub_pc.ply, sub_pc.pkl (for the kdtree), proj_idx.pkl (for each raw point's nearest neighbor in the sub_pc) and weak labels for raw and sub_pc, respectively )
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL), e.g., xx.ply
    :return: None
    """

    # save raw_cloud
    if not os.path.exists(save_path):
        data_list = []
        # aggregate a room's instances into 1 pc
        for f in glob.glob(join(anno_path, '*.txt')):
            class_name = os.path.basename(f).split('_')[0]
            if class_name not in gt_class:  # note: in some room there is 'staris' class..
                class_name = 'clutter'
            pc = pd.read_csv(f, header=None, delim_whitespace=True).values
            labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
            data_list.append(np.concatenate([pc, labels], 1))  # Nx7

        # translate the data by xyz_min--yc
        pc_label = np.concatenate(data_list, 0)  # Nx7 as a np object
        xyz_min = np.amin(pc_label, axis=0)[0:3]
        pc_label[:, 0:3] -= xyz_min
        # manage data types and save in PLY format--yc
        xyz = pc_label[:, :3].astype(np.float32)
        colors = pc_label[:, 3:6].astype(np.uint8)
        labels = pc_label[:, 6].astype(np.uint8)
        write_ply(save_path, (xyz, colors, labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
    else:
        # if existed then read this ply file to fill the data/xyz/colors/labels
        data = read_ply(save_path)  # ply format: x,y,z,red,gree,blue,class
        xyz = np.vstack((data['x'], data['y'],
                         data['z'])).T  # (N',3), note the transpose symbol
        colors = np.vstack(
            (data['red'], data['green'],
             data['blue'])).T  # (N',3), note the transpose symbol
        labels = data['class']
        pc_label = np.concatenate(
            (xyz, colors, np.expand_dims(labels, axis=1)), axis=1)  # (N,7)

    # save sub_cloud
    sub_ply_file = join(sub_pc_folder, save_path.split('/')[-1][:-4] + '.ply')
    if not os.path.exists(sub_ply_file):
        sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
            xyz, colors, labels, sub_grid_size)
        sub_colors = sub_colors / 255.0
        write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
    else:
        data = read_ply(sub_ply_file)  # ply format: x,y,z,red,gree,blue,class
        sub_xyz = np.vstack((data['x'], data['y'],
                             data['z'])).T  # (N',3), note the transpose symbol
        sub_colors = np.vstack(
            (data['red'], data['green'],
             data['blue'])).T  # (N',3), note the transpose symbol
        sub_labels = data['class']

    # save KDTree for sub_pc
    kd_tree_file = join(sub_pc_folder,
                        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    if not os.path.exists(kd_tree_file):
        search_tree = KDTree(sub_xyz)
        with open(kd_tree_file, 'wb') as f:
            pickle.dump(search_tree, f)

    # save projection indcies for all raw points over the corresponding sub_pc
    proj_save = join(sub_pc_folder,
                     str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    if not os.path.exists(proj_save):
        proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
        proj_idx = proj_idx.astype(np.int32)
        with open(proj_save, 'wb') as f:
            pickle.dump([proj_idx, labels], f)
    """
    USED for weakly semantic segmentation
    - save raw pc's weak labels
    - save sub pc's weak labels
    """
    # save raw pc's weak label mask
    weak_label_ply_file = join(
        weak_label_folder,
        save_path.split('/')[-1][:-4] + '_weak_label.ply')
    if not os.path.exists(weak_label_ply_file):
        # set weak points by randomly selecting weak_label_ratio*N points and denote them w. a mask
        num_cloud_points = pc_label.shape[0]
        weak_label_mask = np.zeros((num_cloud_points, 1), dtype=np.uint8)
        # BUG FIXED: fixed already; here, should set replace = True, otherwise a bug will be resulted
        selected_idx = np.random.choice(num_cloud_points,
                                        int(num_cloud_points *
                                            weak_label_ratio),
                                        replace=False)
        weak_label_mask[selected_idx, :] = 1
        write_ply(weak_label_ply_file, (weak_label_mask, ), ['weak_mask'])
    else:
        data = read_ply(
            weak_label_ply_file)  # ply format: x,y,z,red,gree,blue,class
        weak_label_mask = data['weak_mask']

    # save sub pc's weak label mask
    weak_label_sub_file = join(
        weak_label_folder,
        save_path.split('/')[-1][:-4] + '_sub_weak_label.ply')
    if not os.path.exists(weak_label_sub_file):
        # HACK: the grid_sub_sampling is deterministic  if the inputs are the same. So sub_xyz and sub_colors are the same when called 2nd time
        _, _, weak_label_sub_mask = DP.grid_sub_sampling(
            xyz, colors, weak_label_mask, sub_grid_size)
        write_ply(weak_label_sub_file, (weak_label_sub_mask, ), ['weak_mask'])
Beispiel #11
0
for pc_path in glob.glob(join(dataset_path, '*.txt')):
    print(pc_path)
    file_name = pc_path.split('/')[-1][:-4]

    # check if it has already calculated
    if exists(join(sub_pc_folder, file_name + '_KDTree.pkl')):
        continue

    pc = DP.load_pc_semantic3d(pc_path)
    xyz_min = np.amin(pc, axis=0)[0:3]
    pc[:, 0:3] -= xyz_min

    print("Testing")
    full_ply_path = join(original_pc_folder, file_name + '.ply')
    write_ply(full_ply_path, (pc[:, :3].astype(np.float32)), ['x', 'y', 'z'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors = DP.grid_sub_sampling(pc[:, :3].astype(np.float32),
                                               pc[:, :3].astype(np.uint8),
                                               grid_size=grid_size)
    #sub_colors = sub_colors / 255.0
    sub_ply_file = join(sub_pc_folder, file_name + '.ply')
    write_ply(sub_ply_file, [sub_xyz], ['x', 'y', 'z'])
    labels = np.zeros(pc.shape[0], dtype=np.uint8)

    search_tree = KDTree(sub_xyz, leaf_size=50)
    kd_tree_file = join(sub_pc_folder, file_name + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)
Beispiel #12
0
                         delim_whitespace=True).values  # TODO LEER DESDE PLY?
        labels = np.ones((pc.shape[0], 1)) * class2labels[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(
        pc_label, axis=0
    )[0:
      3]  # TODO si esto no se hace y se lee de ply, no hace falta guardar carpeta original
    pc_label[:, 0:3] -= xyz_min

    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    ply_file = os.path.join(path_orig, split, case + ".ply")
    write_ply(ply_file, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(path_out_sub, case + ".ply")
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = os.path.join(path_out_sub, case + "_KDTree.pkl")
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
Beispiel #13
0
    def test(self, model, dataset, run, path_cls, test_name="", num_votes=100):

        # Smoothing parameter for votes
        test_smooth = 0.95

        # Initialise iterator with validation/test data
        self.sess.run(dataset.val_init_op)

        # Number of points per class in validation set
        val_proportions = np.zeros(model.config.num_classes, dtype=np.float32)
        i = 0
        for label_val in dataset.label_values:
            if label_val not in dataset.ignored_labels:
                val_proportions[i] = np.sum([np.sum(labels == label_val) for labels in dataset.val_labels])
                i += 1

        # Test saving path
        test_path = os.path.join(run, 'predictions_' + test_name)
        makedirs(test_path) if not exists(test_path) else None

        step_id = 0
        epoch_id = 0
        last_min = -0.5

        while last_min < num_votes:
            try:
                ops = (self.prob_logits,
                       model.labels,
                       model.inputs['input_inds'],
                       model.inputs['cloud_inds'],
                       )
                #print("a")
                stacked_probs, stacked_labels, point_idx, cloud_idx = self.sess.run(ops, {model.is_training: False})  # TODO SACA PROBS, se puede sacar que caso es con cloud idx?
                correct = np.sum(np.argmax(stacked_probs, axis=1) == stacked_labels)
                acc = correct / float(np.prod(np.shape(stacked_labels)))
                print('step' + str(step_id) + ' acc:' + str(acc))
                stacked_probs = np.reshape(stacked_probs, [model.config.val_batch_size, model.config.num_points,
                                                           model.config.num_classes])

                for j in range(np.shape(stacked_probs)[0]):  # TODO SHAPE Y CONTENIDO DE STACKED PROBS
                    probs = stacked_probs[j, :, :]
                    p_idx = point_idx[j, :]
                    c_i = cloud_idx[j][0]                   # TODO ?? C_I
                    self.test_probs[c_i][p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (1 - test_smooth) * probs # TODO GUARDA PROBS SMOOTHED
                step_id += 1

            except tf.errors.OutOfRangeError:

                new_min = np.min(dataset.min_possibility['test'])
                log_out('Epoch {:3d}, end. Min possibility = {:.1f}'.format(epoch_id, new_min), self.Log_file)

                if last_min + 1 < new_min:

                    # Update last_min
                    last_min += 1

                    # Show vote results (On subcloud so it is not the good values here)
                    log_out('\nConfusion on sub clouds', self.Log_file)
                    confusion_list = []

                    num_val = len(dataset.input_labels['test'])

                    for i_test in range(num_val):
                        #print("b")
                        probs = self.test_probs[i_test]                                                         # TODO RECUPERA PROBS
                        preds = dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32)                 # TODO RECUPERA LAS PREDS
                        labels = dataset.input_labels['test'][i_test]

                        # Confs
                        confusion_list += [confusion_matrix(labels, preds, dataset.label_values)]

                    # Regroup confusions
                    C = np.sum(np.stack(confusion_list), axis=0).astype(np.float32)

                    # Rescale with the right number of point per class
                    C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1)

                    # Compute IoUs
                    IoUs = DP.IoU_from_confusions(C)
                    m_IoU = np.mean(IoUs)
                    s = '{:5.2f} | '.format(100 * m_IoU)
                    for IoU in IoUs:
                        s += '{:5.2f} '.format(100 * IoU)
                    log_out(s + '\n', self.Log_file)

                    if int(np.ceil(new_min)) % 1 == 0:

                        # Project predictions
                        log_out('\nReproject Vote #{:d}'.format(int(np.floor(new_min))), self.Log_file)
                        proj_probs_list = []

                        for i_val in range(num_val):
                            #print("c")
                            # Reproject probs back to the evaluations points
                            proj_idx = dataset.val_proj[i_val]               # TODO SE PUEDE QUITAR LA PARTE DE eval Y SACAR LAS PRED DIRECTAMENTE ASI
                            probs = self.test_probs[i_val][proj_idx, :]
                            proj_probs_list += [probs]

                        # Show vote results
                        log_out('Confusion on full clouds', self.Log_file)
                        confusion_list = []
                        for i_test in range(num_val):
                            #print("d")
                            # Get the predicted labels
                            preds = dataset.label_values[np.argmax(proj_probs_list[i_test], axis=1)].astype(np.uint8)

                            # Confusion
                            labels = dataset.val_labels[i_test]
                            acc = np.sum(preds == labels) / len(labels)
                            log_out(dataset.input_names['test'][i_test] + ' Acc:' + str(acc), self.Log_file)

                            confusion_list += [confusion_matrix(labels, preds, dataset.label_values)]
                            name = dataset.input_names['test'][i_test] + '.ply'             
                            xyz = dataset.input_full_xyz['test'][i_test]
                            pred_colors = DP.labels2colors(preds, path_cls)
                            classes, _, _, _, _ = DP.get_info_classes(path_cls)
                            write_ply(join(test_path, name), (xyz, pred_colors), ['x', 'y', 'z', 'red', 'green', 'blue'])

                        # Regroup confusions
                        C = np.sum(np.stack(confusion_list), axis=0)

                        IoUs = DP.IoU_from_confusions(C)
                        m_IoU = np.mean(IoUs)
                        s = '{:5.2f} | '.format(100 * m_IoU)
                        for IoU in IoUs:
                            s += '{:5.2f} '.format(100 * IoU)

                        str_cls = ""
                        for i in range(len(classes)):
                            str_cls = str_cls + str(classes[i]) + "  " 
                        log_out("\n" + str_cls, self.Log_file)

                        log_out(str(C) + '\n', self.Log_file)

                        log_out("        " + str_cls, self.Log_file)
                        log_out('-' * len(s), self.Log_file)
                        log_out(s, self.Log_file)
                        log_out('-' * len(s) + '\n', self.Log_file)

                        acc_global, prec_calsses, rec_classes, acc_classes = DP.metrics_from_confusions(C)
                        
                        log_out("global accuracy: " + str(acc_global) + '\n', self.Log_file)

                        for i in range(len(classes)):
                            str_acc = str(classes[i]) + ' accuracy: ' + str(acc_classes[i])
                            str_prec = str(classes[i]) + ' precision: ' + str(prec_calsses[i])
                            str_rec = str(classes[i]) + ' recall: ' + str(rec_classes[i])
                            log_out(str_acc, self.Log_file)
                            log_out(str_prec, self.Log_file)
                            log_out(str_rec + '\n', self.Log_file)

                        print('finished \n')
                        self.sess.close()
                        return

                self.sess.run(dataset.val_init_op)
                epoch_id += 1
                step_id = 0
                continue

        return