示例#1
0
 def save(self, path):
     """
     Write the point cloud to a file
     params:
         path: output path
     """
     write_ply(path, self.points, ["x", "y", "z"])
示例#2
0
    def prepare_data(self):
        """
        Download and precompute Seamntic3D point clouds

        """

        if not exists(self.train_path):
            makedirs(self.train_path)
        if not exists(self.test_path):
            makedirs(self.test_path)

        # Folder names
        old_folder = join(self.path, self.original_folder)

        # Text files containing points
        cloud_names = [file_name[:-4] for file_name in listdir(old_folder) if file_name[-4:] == '.txt']

        for cloud_name in cloud_names:

            # Name of the files
            txt_file = join(old_folder, cloud_name + '.txt')
            label_file = join(old_folder, cloud_name + '.labels')

            if exists(label_file):
                ply_file_full = join(self.train_path, cloud_name + '.ply')
            else:
                ply_file_full = join(self.test_path, cloud_name + '.ply')

            # Pass if already done
            if exists(ply_file_full):
                print('{:s} already done\n'.format(cloud_name))
                continue

            print('Preparation of {:s}'.format(cloud_name))

            data = np.loadtxt(txt_file)

            points = data[:, :3].astype(np.float32)
            colors = data[:, 4:7].astype(np.uint8)

            if exists(label_file):

                # Load labels
                labels = np.loadtxt(label_file, dtype=np.int32)

                # Subsample to save space
                sub_points, sub_colors, sub_labels = grid_subsampling(points,
                                                                      features=colors,
                                                                      labels=labels,
                                                                      sampleDl=0.01)

                # Write the subsampled ply file
                write_ply(ply_file_full, (sub_points, sub_colors, sub_labels), ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            else:

                # Write the full ply file
                write_ply(ply_file_full, (points, colors), ['x', 'y', 'z', 'red', 'green', 'blue'])
def convert_pointcloud2ply(annotations_path, save_path, sub_grid_size=0.04):
    """convert original files(.txt) to ply file(each line is XYZRGBL).

    Args:
        annotations_path (str): path to annotations
        save_path (str): path to save original point clouds (each line is XYZRGBL)
        sub_grid_size (float, optional): [description]. Defaults to 0.04.
    """
    make_dir(sub_grid_size)
    data_list = []
    for file in glob.glob(os.path.join(annotations_path, '*.txt')):
        class_name = os.path.basename(file).split('_')[0]

        if class_name not in ground_truth_class:
            class_name = 'clutter'

        pointcloud = pd.read_csv(file, header=None,
                                 delim_whitespace=True).values
        labels = np.ones(
            (pointcloud.shape[0], 1)) * ground_truth_label[class_name]
        data = np.concatenate([pointcloud, labels],
                              axis=1)  # x,y,z,r,g,b,label
        data_list.append(data)
        print(pointcloud)
        print(labels)

    pointcloud_and_label = np.concatenate([data for data in data_list], axis=0)
    xyz_min = np.min(pointcloud_and_label, axis=0)[0:3]
    pointcloud_and_label[:, 0:3] = pointcloud_and_label[:, 0:3] - xyz_min

    xyz = pointcloud_and_label[:, 0:3].astype(np.float32)
    colors = pointcloud_and_label[:, 3:6].astype(np.uint8)
    labels = pointcloud_and_label[:, 6].astype(np.uint8)
    ply.write_ply(save_path, (xyz, colors, labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    sub_xyz, sub_colors, sub_labels = DataProcessing.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pointcloud_folder,
                                save_path.split('/')[-1][:-4] + '.ply')
    ply.write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    project_index = np.squeeze(search_tree.query(xyz, return_distance=False))
    project_index = project_index.astype(np.int32)
    project_save = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_project.pkl')
    with open(project_save, 'wb') as f:
        pickle.dump([project_index, labels], f)
def get_statistics(predict_folder):
    log_file = os.path.join(predict_folder, 'results.txt')

    classified_files = utils.get_files_with_ext(data_dir, '.ply')
    cm_global = ConfusionMatrix(num_classes)

    for file in classified_files:

        data = read_ply(file)
        gt_labels = data['class']
        base = os.path.basename(file)

        pred_file = os.path.join(predict_folder, base)
        pred_data = read_ply(pred_file)
        pred_labels = pred_data['preds']

        rgb = get_rgb_color_codes(pred_labels)
        results = {}
        results['x'] = data['x']
        results['y'] = data['y']
        results['z'] = data['z']
        results['red'] = rgb[:, 0]
        results['green'] = rgb[:, 1]
        results['blue'] = rgb[:, 2]
        results['correct'] = np.equal(gt_labels, pred_labels).astype(np.int32)
        results['preds'] = pred_data['preds']
        results['ground_truth'] = data['class']

        test = [
            results['x'], results['y'], results['z'], results['red'],
            results['green'], results['blue'], results['correct'],
            results['preds'], results['ground_truth']
        ]

        write_ply(pred_file.replace('.ply', '_color.ply'), test, [
            'x', 'y', 'z', 'red', 'green', 'blue', 'correct', 'preds',
            'ground_truth'
        ])

        predicted = file.replace('.labels', '.txt')
        with open(log_file, 'a') as log:
            log.write("File: {}".format(predicted))
            log.close()

        cm = ConfusionMatrix(num_classes)
        cm.increment_from_list(gt_labels, pred_labels)
        cm.print_metrics(log_file)
        cm_global.increment_from_list(gt_labels, pred_labels)

    with open(log_file, 'a') as log:
        log.write("Global Stats")
        log.close()

    cm_global_1 = cm_global.confusion_matrix.astype(
        'float') / cm_global.confusion_matrix.sum(axis=1)[:, np.newaxis]
    print(cm_global_1.diagonal())
    cm_global.print_metrics(log_file)
示例#5
0
    def save_kernel_points(self, model, epoch):
        """
        Method saving kernel point disposition and current model weights for later visualization
        """

        if model.config.saving:

            # Create a directory to save kernels of this epoch
            kernels_dir = join(model.saving_path, 'kernel_points', 'epoch{:d}'.format(epoch))
            if not exists(kernels_dir):
                makedirs(kernels_dir)

            # Get points
            all_kernel_points_tf = [v for v in tf.global_variables() if 'kernel_points' in v.name
                                    and v.name.startswith('KernelPoint')]
            all_kernel_points = self.sess.run(all_kernel_points_tf)

            # Get Extents
            if False and 'gaussian' in model.config.convolution_mode:
                all_kernel_params_tf = [v for v in tf.global_variables() if 'kernel_extents' in v.name
                                        and v.name.startswith('KernelPoint')]
                all_kernel_params = self.sess.run(all_kernel_params_tf)
            else:
                all_kernel_params = [None for p in all_kernel_points]

            # Save in ply file
            for kernel_points, kernel_extents, v in zip(all_kernel_points, all_kernel_params, all_kernel_points_tf):

                # Name of saving file
                ply_name = '_'.join(v.name[:-2].split('/')[1:-1]) + '.ply'
                ply_file = join(kernels_dir, ply_name)

                # Data to save
                if kernel_points.ndim > 2:
                    kernel_points = kernel_points[:, 0, :]
                if False and 'gaussian' in model.config.convolution_mode:
                    data = [kernel_points, kernel_extents]
                    keys = ['x', 'y', 'z', 'sigma']
                else:
                    data = kernel_points
                    keys = ['x', 'y', 'z']

                # Save
                write_ply(ply_file, data, keys)

            # Get Weights
            all_kernel_weights_tf = [v for v in tf.global_variables() if 'weights' in v.name
                                    and v.name.startswith('KernelPointNetwork')]
            all_kernel_weights = self.sess.run(all_kernel_weights_tf)

            # Save in numpy file
            for kernel_weights, v in zip(all_kernel_weights, all_kernel_weights_tf):
                np_name = '_'.join(v.name[:-2].split('/')[1:-1]) + '.npy'
                np_file = join(kernels_dir, np_name)
                np.save(np_file, kernel_weights)
def convert_pointcloud2ply(annotations_path, save_path, sub_grid_size=0.04):
    """convert original files(.txt) to ply file(each line is XYZRGBL).

    Args:
        annotations_path (str): path to annotations
        save_path (str): path to save original point clouds (each line is XYZRGBL)
        sub_grid_size (float, optional): [description]. Defaults to 0.04.
    """
    make_dir(sub_grid_size)

    class_name = os.path.basename(annotations_path).split('/')[0][:-9]
    pointcloud = np.loadtxt(annotations_path, delimiter=',').astype(np.float32)
    labels = np.ones((pointcloud.shape[0], 1)) * ground_truth_label[class_name]
    pointcloud_and_label = np.concatenate([pointcloud, labels], axis=1)
    xyz_min = np.min(pointcloud_and_label, axis=0)[0:3]
    pointcloud_and_label[:, 0:3] = pointcloud_and_label[:, 0:3] - xyz_min

    xyz = pointcloud_and_label[:, 0:3].astype(np.float32)
    colors = pointcloud_and_label[:, 3:6].astype(np.uint8)
    labels = pointcloud_and_label[:, 6].astype(np.uint8)

    print(save_path)
    ply.write_ply(save_path, (xyz, colors, labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    sub_xyz, sub_colors, sub_labels = DataProcessing.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pointcloud_folder,
                                save_path.split('/')[-1][:-4] + '.ply')
    print(sub_ply_file)
    ply.write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    print(kd_tree_file)
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    project_index = np.squeeze(search_tree.query(xyz, return_distance=False))
    project_index = project_index.astype(np.int32)
    project_save = os.path.join(
        sub_pointcloud_folder,
        str(save_path.split('/')[-1][:-4]) + '_project.pkl')
    print(project_save)
    with open(project_save, 'wb') as f:
        pickle.dump([project_index, labels], f)
def generate_cube(side, sample_per_side, out):

    sps = sample_per_side
    arr = np.arange(sample_per_side**3)
    x = arr % sps
    y = arr // sps % sps
    z = arr // sps // sps % sps

    coords = np.vstack([x, y, z]).T
    is_boundary = np.logical_or.reduce(np.logical_or(coords == 0,
                                                     coords == sps - 1),
                                       axis=-1)
    coords = coords[is_boundary].astype(np.float32)

    write_ply(out, (coords, ), ['x', 'y', 'z'])
    def prepare_PartNet_ply(self):

        print('\nPreparing ply files')
        t0 = time.time()
        # Convert to ply
        # **************
        splits = ['train', 'val', 'test']
        for split in splits:
            ply_path = os.path.join(self.path, '{:s}_ply'.format(split))
            if not os.path.exists(ply_path):
                os.makedirs(ply_path)
            for class_name in self.label_names:
                split_filelist = os.path.join(self.path,
                                              '{}-{}'.format(class_name, 3),
                                              '{:s}_files.txt'.format(split))
                split_points, split_labels = self._load_seg(split_filelist)
                N = split_points.shape[0]
                for i in range(N):
                    ply_name = os.path.join(
                        ply_path, '{:s}_{:04d}.ply'.format(class_name, i))
                    if os.path.exists(ply_name):
                        continue
                    points = split_points[i]
                    labels = split_labels[i]

                    # Center and rescale point for 1m radius
                    pmin = np.min(points, axis=0)
                    pmax = np.max(points, axis=0)
                    points -= (pmin + pmax) / 2
                    scale = np.max(np.linalg.norm(points, axis=1))
                    points *= 1.0 / scale

                    # Switch y and z dimensions
                    points = points[:, [0, 2, 1]]

                    # Save in ply format
                    write_ply(ply_name, (points, labels),
                              ['x', 'y', 'z', 'label'])
                    # Display
                    print('preparing {:s} {:s} ply: {:.1f}%'.format(
                        class_name, split, 100 * i / N))

        print('Done in {:.1f}s'.format(time.time() - t0))
示例#9
0
                def keyboard_callback(vtk_obj, event):
                    global obj_i, point_i, offsets, p_scale, show_in_p

                    if vtk_obj.GetKeyCode() in ['b', 'B']:
                        p_scale /= 1.5
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['n', 'N']:
                        p_scale *= 1.5
                        update_scene()

                    if vtk_obj.GetKeyCode() in ['g', 'G']:
                        obj_i = (obj_i - 1) % len(deformed_KP)
                        point_i = 0
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['h', 'H']:
                        obj_i = (obj_i + 1) % len(deformed_KP)
                        point_i = 0
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['k', 'K']:
                        offsets = not offsets
                        animate_kernel()

                    elif vtk_obj.GetKeyCode() in ['z', 'Z']:
                        show_in_p = (show_in_p + 1) % 3
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['0']:

                        print('Saving')

                        # Find a new name
                        file_i = 0
                        file_name = 'KP_{:03d}.ply'.format(file_i)
                        files = [f for f in listdir('KP_clouds') if f.endswith('.ply')]
                        while file_name in files:
                            file_i += 1
                            file_name = 'KP_{:03d}.ply'.format(file_i)

                        KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
                        KP_normal = points[obj_i][point_i] + original_KPs[chosen_KP]

                        # Save
                        write_ply(join('KP_clouds', file_name),
                                  [in_points[obj_i], in_colors[obj_i]],
                                  ['x', 'y', 'z', 'red', 'green', 'blue'])
                        write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)),
                                  [KP_deform],
                                  ['x', 'y', 'z'])
                        write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)),
                                  [KP_normal],
                                  ['x', 'y', 'z'])
                        print('OK')

                    return
scalar_product = (points - barycenter) @ eigenvectors[:, 2]
hash_index = scalar_product // bucket_size
chunck_ids = np.unique(hash_index)

selected_index = 0
scalar_product[np.where(hash_index == selected_index)]
upper_bound = (selected_index + 1) * bucket_size
lower_bound = selected_index * bucket_size

interest_indexes = np.where(hash_index == selected_index)[0]
fuzzy_indexes = np.where((scalar_product <= upper_bound+bucket_residual)*\
         (scalar_product >= lower_bound-bucket_residual))[0]

# Store points with such voxelization
write_ply('./data_processing/interest_points.ply',
          [points[interest_indexes],
           np.ones(interest_indexes.shape) * 10], ['x', 'y', 'z', 'color'])

write_ply('./data_processing/interest_and_boundary_points.ply',
          [points[fuzzy_indexes],
           np.ones(fuzzy_indexes.shape) * 0], ['x', 'y', 'z', 'color'])

#%% Iterate for features computations

radius = 0.5

features = np.empty((0, 4))
features_index = []

feature_file = 'features/training/' + training_path.split('/')[-1].split(
    '.')[0] + '_features.npy'
示例#11
0
    def test_cloud_segmentation(self, model, dataset, num_votes=100):

        ##########
        # Initiate
        ##########

        # Smoothing parameter for votes
        test_smooth = 0.98

        # Initialise iterator with train data
        self.sess.run(dataset.test_init_op)

        # Initiate global prediction over test clouds
        nc_model = model.config.num_classes
        self.test_probs = [
            np.zeros((l.data.shape[0], nc_model), dtype=np.float32)
            for l in dataset.input_trees['test']
        ]

        # Test saving path
        if model.config.saving:
            test_path = join('test', model.saving_path.split('/')[-1])
            if not exists(test_path):
                makedirs(test_path)
            if not exists(join(test_path, 'predictions')):
                makedirs(join(test_path, 'predictions'))
            if not exists(join(test_path, 'probs')):
                makedirs(join(test_path, 'probs'))
        else:
            test_path = None

        #####################
        # Network predictions
        #####################

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        many_runs_timeline = TimeLiner()

        i0 = 0
        epoch_ind = 0
        last_min = -0.5
        mean_dt = np.zeros(2)
        last_display = time.time()
        while last_min < num_votes:
            try:
                # Run one step of the model.
                t = [time.time()]
                ops = (self.prob_logits, model.labels,
                       model.inputs['in_batches'], model.inputs['point_inds'],
                       model.inputs['cloud_inds'])
                stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(
                    ops, {model.dropout_prob: 1.0})
                """
                stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(ops,
                                                                                       {model.dropout_prob: 1.0},
                                                                                       options=options,
                                                                                       run_metadata=run_metadata)
                """
                t += [time.time()]

                #fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                #chrome_trace = fetched_timeline.generate_chrome_trace_format()
                #many_runs_timeline.update_timeline(chrome_trace)

                if False:
                    many_runs_timeline.save('timeline_merged_%d_runs.json' %
                                            i0)
                    a = 1 / 0

                # Get predictions and labels per instance
                # ***************************************

                # Stack all predictions for each class separately
                max_ind = np.max(batches)
                for b_i, b in enumerate(batches):
                    # Eliminate shadow indices
                    b = b[b < max_ind - 0.5]

                    # Get prediction (only for the concerned parts)
                    probs = stacked_probs[b]
                    inds = point_inds[b]
                    c_i = cloud_inds[b_i]

                    # Update current probs in whole cloud
                    self.test_probs[c_i][inds] = test_smooth * self.test_probs[
                        c_i][inds] + (1 - test_smooth) * probs

                # Average timing
                t += [time.time()]
                #print(batches.shape, stacked_probs.shape, 1000*(t[1] - t[0]), 1000*(t[2] - t[1]))
                mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                                   np.array(t[:-1]))

                # Display
                if (t[-1] - last_display) > 1.0:
                    last_display = t[-1]
                    message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
                    print(
                        message.format(epoch_ind, i0, 1000 * (mean_dt[0]),
                                       1000 * (mean_dt[1]),
                                       np.min(dataset.min_potentials['test'])))

                i0 += 1

            except tf.errors.OutOfRangeError:

                # Save predicted cloud
                new_min = np.min(dataset.min_potentials['test'])
                print('Epoch {:3d}, end. Min potential = {:.1f}'.format(
                    epoch_ind, new_min))
                print([np.mean(pots) for pots in dataset.potentials['test']])

                if last_min + 2 < new_min:

                    print('Saving clouds')

                    # Update last_min
                    last_min = new_min

                    # Project predictions
                    print('\nReproject Vote #{:d}'.format(
                        int(np.floor(new_min))))
                    t1 = time.time()
                    files = dataset.test_files
                    i_test = 0
                    for i, file_path in enumerate(files):

                        # Get file
                        points = dataset.load_evaluation_points(file_path)

                        # Reproject probs
                        probs = self.test_probs[i_test][
                            dataset.test_proj[i_test], :]

                        # Insert false columns for ignored labels
                        probs2 = probs.copy()
                        for l_ind, label_value in enumerate(
                                dataset.label_values):
                            if label_value in dataset.ignored_labels:
                                probs2 = np.insert(probs2, l_ind, 0, axis=1)

                        # Get the predicted labels
                        preds = dataset.label_values[np.argmax(
                            probs2, axis=1)].astype(np.int32)

                        # Project potentials on original points
                        pots = dataset.potentials['test'][i_test][
                            dataset.test_proj[i_test]]

                        # Save plys
                        cloud_name = file_path.split('/')[-1]
                        test_name = join(test_path, 'predictions', cloud_name)
                        write_ply(test_name, [points, preds, pots],
                                  ['x', 'y', 'z', 'preds', 'pots'])
                        test_name2 = join(test_path, 'probs', cloud_name)
                        prob_names = [
                            '_'.join(dataset.label_to_names[label].split())
                            for label in dataset.label_values
                            if label not in dataset.ignored_labels
                        ]
                        write_ply(test_name2, [points, probs],
                                  ['x', 'y', 'z'] + prob_names)

                        # Save ascii preds
                        if dataset.name.startswith('Semantic3D'):
                            ascii_name = join(test_path, 'predictions',
                                              dataset.ascii_files[cloud_name])
                        else:
                            ascii_name = join(test_path, 'predictions',
                                              cloud_name[:-4] + '.txt')
                        np.savetxt(ascii_name, preds, fmt='%d')
                        i_test += 1

                    t2 = time.time()
                    print('Done in {:.1f} s\n'.format(t2 - t1))

                self.sess.run(dataset.test_init_op)
                epoch_ind += 1
                i0 = 0
                continue

        return
示例#12
0
    # ************************
    #

    # Define the decimation factor
    factor = 300

    # Decimate
    t0 = time.time()
    decimated_points, decimated_colors, decimated_labels = cloud_decimation(
        points, colors, labels, factor)
    t1 = time.time()
    print('decimation done in {:.3f} seconds'.format(t1 - t0))

    # Save
    write_ply('../decimated.ply',
              [decimated_points, decimated_colors, decimated_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'label'])

    # Subsample the point cloud on a grid
    # ***********************************
    #

    # Define the size of the grid
    voxel_size = 0.2

    # Subsample
    t0 = time.time()
    subsampled_points = grid_subsampling(voxel_size, points)
    t1 = time.time()
    print('Subsampling done in {:.3f} seconds'.format(t1 - t0))
示例#13
0
    def cloud_validation_error(self, model, dataset):
        """
        Validation method for cloud segmentation models
        """

        ##########
        # Initiate
        ##########

        # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
        val_smooth = 0.95

        # Do not validate if dataset has no validation cloud
        if dataset.validation_split not in dataset.all_splits:
            return

        # Initialise iterator with train data
        self.sess.run(dataset.val_init_op)

        # Number of classes including ignored labels
        nc_tot = dataset.num_classes

        # Number of classes predicted by the model
        nc_model = model.config.num_classes

        # Initiate global prediction over validation clouds
        if not hasattr(self, 'validation_probs'):
            self.validation_probs = [
                np.zeros((l.shape[0], nc_model))
                for l in dataset.input_labels['validation']
            ]
            self.val_proportions = np.zeros(nc_model, dtype=np.float32)
            i = 0
            for label_value in dataset.label_values:
                if label_value not in dataset.ignored_labels:
                    self.val_proportions[i] = np.sum([
                        np.sum(labels == label_value)
                        for labels in dataset.validation_labels
                    ])
                    i += 1

        #####################
        # Network predictions
        #####################

        predictions = []
        targets = []
        mean_dt = np.zeros(2)
        last_display = time.time()
        for i0 in range(model.config.validation_size):
            try:
                # Run one step of the model.
                t = [time.time()]
                ops = (self.prob_logits, model.labels,
                       model.inputs['in_batches'], model.inputs['point_inds'],
                       model.inputs['cloud_inds'])
                stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(
                    ops, {model.dropout_prob: 1.0})
                t += [time.time()]

                # Get predictions and labels per instance
                # ***************************************

                # Stack all validation predictions for each class separately
                max_ind = np.max(batches)
                for b_i, b in enumerate(batches):

                    # Eliminate shadow indices
                    b = b[b < max_ind - 0.5]

                    # Get prediction (only for the concerned parts)
                    probs = stacked_probs[b]
                    inds = point_inds[b]
                    c_i = cloud_inds[b_i]

                    # Update current probs in whole cloud
                    self.validation_probs[c_i][inds] = val_smooth * self.validation_probs[c_i][inds] \
                                                                + (1-val_smooth) * probs

                    # Stack all prediction for this epoch
                    predictions += [probs]
                    targets += [dataset.input_labels['validation'][c_i][inds]]

                # Average timing
                t += [time.time()]
                mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                                   np.array(t[:-1]))

                # Display
                if (t[-1] - last_display) > 1.0:
                    last_display = t[-1]
                    message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
                    print(
                        message.format(100 * i0 / model.config.validation_size,
                                       1000 * (mean_dt[0]),
                                       1000 * (mean_dt[1])))

            except tf.errors.OutOfRangeError:
                break

        # Confusions for our subparts of validation set
        Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
        for i, (probs, truth) in enumerate(zip(predictions, targets)):

            # Insert false columns for ignored labels
            for l_ind, label_value in enumerate(dataset.label_values):
                if label_value in dataset.ignored_labels:
                    probs = np.insert(probs, l_ind, 0, axis=1)

            # Predicted labels
            preds = dataset.label_values[np.argmax(probs, axis=1)]

            # Confusions
            Confs[i, :, :] = confusion_matrix(truth, preds,
                                              dataset.label_values)

        # Sum all confusions
        C = np.sum(Confs, axis=0).astype(np.float32)

        # Remove ignored labels from confusions
        for l_ind, label_value in reversed(
                list(enumerate(dataset.label_values))):
            if label_value in dataset.ignored_labels:
                C = np.delete(C, l_ind, axis=0)
                C = np.delete(C, l_ind, axis=1)

        # Balance with real validation proportions
        C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6),
                            1)

        # Objects IoU
        IoUs = IoU_from_confusions(C)

        # Saving (optionnal)
        if model.config.saving:

            # Name of saving file
            test_file = join(model.saving_path, 'val_IoUs.txt')

            # Line to write:
            line = ''
            for IoU in IoUs:
                line += '{:.3f} '.format(IoU)
            line = line + '\n'

            # Write in file
            if exists(test_file):
                with open(test_file, "a") as text_file:
                    text_file.write(line)
            else:
                with open(test_file, "w") as text_file:
                    text_file.write(line)

        # Print instance mean
        mIoU = 100 * np.mean(IoUs)
        print('{:s} mean IoU = {:.1f}%'.format(model.config.dataset, mIoU))

        # Save predicted cloud occasionally
        if model.config.saving and (self.training_epoch +
                                    1) % model.config.snapshot_gap == 0:
            val_path = join(model.saving_path,
                            'val_preds_{:d}'.format(self.training_epoch))
            if not exists(val_path):
                makedirs(val_path)
            files = dataset.train_files
            i_val = 0
            for i, file_path in enumerate(files):
                if dataset.all_splits[i] == dataset.validation_split:

                    # Get points
                    points = dataset.load_evaluation_points(file_path)

                    # Get probs on our own ply points
                    sub_probs = self.validation_probs[i_val]

                    # Insert false columns for ignored labels
                    for l_ind, label_value in enumerate(dataset.label_values):
                        if label_value in dataset.ignored_labels:
                            sub_probs = np.insert(sub_probs, l_ind, 0, axis=1)

                    # Get the predicted labels
                    sub_preds = dataset.label_values[np.argmax(sub_probs,
                                                               axis=1).astype(
                                                                   np.int32)]

                    # Reproject preds on the evaluations points
                    preds = (sub_preds[dataset.validation_proj[i_val]]).astype(
                        np.int32)

                    # Path of saved validation file
                    cloud_name = file_path.split('/')[-1]
                    val_name = join(val_path, cloud_name)

                    # Save file
                    labels = dataset.validation_labels[i_val].astype(np.int32)
                    write_ply(val_name, [points, preds, labels],
                              ['x', 'y', 'z', 'preds', 'class'])

                    i_val += 1

        return
示例#14
0
    colors = np.vstack((data['red'], data['green'], data['blue'])).T

    # Get the scalar field which represent density as a vector
    density = data['scalar_density']

    # Transform point cloud
    # *********************
    #
    #   Follow the instructions step by step
    #

    # Replace this line by your code
        
    rotation_matrix = np.asarray([[0,-1,0],[1,0,0],[0,0,1]])
    transformed_points = (points - np.mean(points,axis=0))/2
    transformed_points = transformed_points.dot(rotation_matrix)
    transformed_points += (np.mean(points,axis=0))
    transformed_points[:,1] -= 0.1

    # Save point cloud
    # *********************
    #
    #   Save your result file
    #   (See write_ply function)
    #

    # Save point cloud
    write_ply('../results/little_bunny.ply', [transformed_points, colors, density], ['x', 'y', 'z', 'red', 'green', 'blue', 'density'])

    print('Done')
示例#15
0
num_classes = 14

model = RandLANet(d_in, num_classes, 16, 4, device)
model.load_state_dict(
    torch.load('runs/2020-04-11_17:03/checkpoint_10.pth')['model_state_dict'])
model.eval()

points, labels = next(iter(loader))

print('Predicting labels...')
with torch.no_grad():
    points = points.to(device)
    labels = labels.to(device)
    scores = model(points)
    predictions = torch.max(scores, dim=-2).indices
    accuracy = (
        predictions == labels).float().mean()  # TODO: compute mIoU usw.
    print('Accuracy:', accuracy.item())
    predictions = predictions.cpu().numpy()

print('Writing results...')
np.savetxt('output.txt', predictions, fmt='%d', delimiter='\n')

t1 = time.time()
# write point cloud with classes
print('Assigning labels to the point cloud...')
cloud = points.squeeze(0)[:, :3]
write_ply('MiniDijon9.ply', [cloud, predictions], ['x', 'y', 'z', 'class'])

print('Done. Time elapsed: {:.1f}s'.format(t1 - t0))
    def slam_segmentation_test(self,
                               net,
                               test_loader,
                               config,
                               num_votes=100,
                               debug=True):
        """
        Test method for slam segmentation models
        """

        ############
        # Initialize
        ############

        # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
        test_smooth = 0.5
        last_min = -0.5
        softmax = torch.nn.Softmax(1)

        # Number of classes including ignored labels
        nc_tot = test_loader.dataset.num_classes
        nc_model = net.C

        # Test saving path
        test_path = None
        report_path = None
        if config.saving:
            test_path = join('test', config.saving_path.split('/')[-1])
            if not exists(test_path):
                makedirs(test_path)
            report_path = join(test_path, 'reports')
            if not exists(report_path):
                makedirs(report_path)

        if test_loader.dataset.set == 'validation':
            for folder in ['val_predictions', 'val_probs']:
                if not exists(join(test_path, folder)):
                    makedirs(join(test_path, folder))
        else:
            for folder in ['predictions', 'probs']:
                if not exists(join(test_path, folder)):
                    makedirs(join(test_path, folder))

        # Init validation container
        all_f_preds = []
        all_f_labels = []
        if test_loader.dataset.set == 'validation':
            for i, seq_frames in enumerate(test_loader.dataset.frames):
                all_f_preds.append(
                    [np.zeros((0, ), dtype=np.int32) for _ in seq_frames])
                all_f_labels.append(
                    [np.zeros((0, ), dtype=np.int32) for _ in seq_frames])

        #####################
        # Network predictions
        #####################

        predictions = []
        targets = []
        test_epoch = 0

        t = [time.time()]
        last_display = time.time()
        mean_dt = np.zeros(1)

        # Start test loop
        while True:
            print('Initialize workers')
            for i, batch in enumerate(test_loader):

                # New time
                t = t[-1:]
                t += [time.time()]

                if i == 0:
                    print('Done in {:.1f}s'.format(t[1] - t[0]))

                if 'cuda' in self.device.type:
                    batch.to(self.device)

                # Forward pass
                outputs = net(batch, config)

                # Get probs and labels
                stk_probs = softmax(outputs).cpu().detach().numpy()
                lengths = batch.lengths[0].cpu().numpy()
                f_inds = batch.frame_inds.cpu().numpy()
                r_inds_list = batch.reproj_inds
                r_mask_list = batch.reproj_masks
                labels_list = batch.val_labels
                torch.cuda.synchronize(self.device)

                t += [time.time()]

                # Get predictions and labels per instance
                # ***************************************

                i0 = 0
                for b_i, length in enumerate(lengths):

                    # Get prediction
                    probs = stk_probs[i0:i0 + length]
                    proj_inds = r_inds_list[b_i]
                    proj_mask = r_mask_list[b_i]
                    frame_labels = labels_list[b_i]
                    s_ind = f_inds[b_i, 0]
                    f_ind = f_inds[b_i, 1]

                    # Project predictions on the frame points
                    proj_probs = probs[proj_inds]

                    # Safe check if only one point:
                    if proj_probs.ndim < 2:
                        proj_probs = np.expand_dims(proj_probs, 0)

                    # Save probs in a binary file (uint8 format for lighter weight)
                    seq_name = test_loader.dataset.sequences[s_ind]
                    if test_loader.dataset.set == 'validation':
                        folder = 'val_probs'
                        pred_folder = 'val_predictions'
                    else:
                        folder = 'probs'
                        pred_folder = 'predictions'
                    filename = '{:s}_{:07d}.npy'.format(seq_name, f_ind)
                    filepath = join(test_path, folder, filename)
                    if exists(filepath):
                        frame_probs_uint8 = np.load(filepath)
                    else:
                        frame_probs_uint8 = np.zeros(
                            (proj_mask.shape[0], nc_model), dtype=np.uint8)
                    frame_probs = frame_probs_uint8[proj_mask, :].astype(
                        np.float32) / 255
                    frame_probs = test_smooth * frame_probs + (
                        1 - test_smooth) * proj_probs
                    frame_probs_uint8[proj_mask, :] = (frame_probs *
                                                       255).astype(np.uint8)
                    np.save(filepath, frame_probs_uint8)

                    # Save some prediction in ply format for visual
                    if test_loader.dataset.set == 'validation':

                        # Insert false columns for ignored labels
                        frame_probs_uint8_bis = frame_probs_uint8.copy()
                        for l_ind, label_value in enumerate(
                                test_loader.dataset.label_values):
                            if label_value in test_loader.dataset.ignored_labels:
                                frame_probs_uint8_bis = np.insert(
                                    frame_probs_uint8_bis, l_ind, 0, axis=1)

                        # Predicted labels
                        frame_preds = test_loader.dataset.label_values[
                            np.argmax(frame_probs_uint8_bis,
                                      axis=1)].astype(np.int32)

                        # Save some of the frame pots
                        if f_ind % 20 == 0:
                            seq_path = join(
                                test_loader.dataset.path, 'sequences',
                                test_loader.dataset.sequences[s_ind])
                            velo_file = join(
                                seq_path, 'velodyne',
                                test_loader.dataset.frames[s_ind][f_ind] +
                                '.bin')
                            frame_points = np.fromfile(velo_file,
                                                       dtype=np.float32)
                            frame_points = frame_points.reshape((-1, 4))
                            predpath = join(test_path, pred_folder,
                                            filename[:-4] + '.ply')
                            #pots = test_loader.dataset.f_potentials[s_ind][f_ind]
                            pots = np.zeros((0, ))
                            if pots.shape[0] > 0:
                                write_ply(predpath, [
                                    frame_points[:, :3], frame_labels,
                                    frame_preds, pots
                                ], ['x', 'y', 'z', 'gt', 'pre', 'pots'])
                            else:
                                write_ply(predpath, [
                                    frame_points[:, :3], frame_labels,
                                    frame_preds
                                ], ['x', 'y', 'z', 'gt', 'pre'])

                            # Also Save lbl probabilities
                            probpath = join(test_path, folder,
                                            filename[:-4] + '_probs.ply')
                            lbl_names = [
                                test_loader.dataset.label_to_names[l]
                                for l in test_loader.dataset.label_values
                                if l not in test_loader.dataset.ignored_labels
                            ]
                            write_ply(probpath,
                                      [frame_points[:, :3], frame_probs_uint8],
                                      ['x', 'y', 'z'] + lbl_names)

                        # keep frame preds in memory
                        all_f_preds[s_ind][f_ind] = frame_preds
                        all_f_labels[s_ind][f_ind] = frame_labels

                    else:

                        # Save some of the frame preds
                        if f_inds[b_i, 1] % 100 == 0:

                            # Insert false columns for ignored labels
                            for l_ind, label_value in enumerate(
                                    test_loader.dataset.label_values):
                                if label_value in test_loader.dataset.ignored_labels:
                                    frame_probs_uint8 = np.insert(
                                        frame_probs_uint8, l_ind, 0, axis=1)

                            # Predicted labels
                            frame_preds = test_loader.dataset.label_values[
                                np.argmax(frame_probs_uint8,
                                          axis=1)].astype(np.int32)

                            # Load points
                            seq_path = join(
                                test_loader.dataset.path, 'sequences',
                                test_loader.dataset.sequences[s_ind])
                            velo_file = join(
                                seq_path, 'velodyne',
                                test_loader.dataset.frames[s_ind][f_ind] +
                                '.bin')
                            frame_points = np.fromfile(velo_file,
                                                       dtype=np.float32)
                            frame_points = frame_points.reshape((-1, 4))
                            predpath = join(test_path, pred_folder,
                                            filename[:-4] + '.ply')
                            #pots = test_loader.dataset.f_potentials[s_ind][f_ind]
                            pots = np.zeros((0, ))
                            if pots.shape[0] > 0:
                                write_ply(
                                    predpath,
                                    [frame_points[:, :3], frame_preds, pots],
                                    ['x', 'y', 'z', 'pre', 'pots'])
                            else:
                                write_ply(predpath,
                                          [frame_points[:, :3], frame_preds],
                                          ['x', 'y', 'z', 'pre'])

                    # Stack all prediction for this epoch
                    i0 += length

                # Average timing
                t += [time.time()]
                mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                                   np.array(t[:-1]))

                # Display
                if (t[-1] - last_display) > 1.0:
                    last_display = t[-1]
                    message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%'
                    min_pot = int(
                        torch.floor(torch.min(test_loader.dataset.potentials)))
                    pot_num = torch.sum(
                        test_loader.dataset.potentials > min_pot + 0.5).type(
                            torch.int32).item()
                    current_num = pot_num + (
                        i + 1 - config.validation_size) * config.val_batch_num
                    print(
                        message.format(
                            test_epoch, i, 100 * i / config.validation_size,
                            1000 * (mean_dt[0]), 1000 * (mean_dt[1]),
                            1000 * (mean_dt[2]), min_pot, 100.0 * current_num /
                            len(test_loader.dataset.potentials)))

            # Update minimum od potentials
            new_min = torch.min(test_loader.dataset.potentials)
            print('Test epoch {:d}, end. Min potential = {:.1f}'.format(
                test_epoch, new_min))

            if last_min + 1 < new_min:

                # Update last_min
                last_min += 1

                if test_loader.dataset.set == 'validation' and last_min % 1 == 0:

                    #####################################
                    # Results on the whole validation set
                    #####################################

                    # Confusions for our subparts of validation set
                    Confs = np.zeros((len(predictions), nc_tot, nc_tot),
                                     dtype=np.int32)
                    for i, (preds,
                            truth) in enumerate(zip(predictions, targets)):

                        # Confusions
                        Confs[i, :, :] = fast_confusion(
                            truth, preds,
                            test_loader.dataset.label_values).astype(np.int32)

                    # Show vote results
                    print('\nCompute confusion')

                    val_preds = []
                    val_labels = []
                    t1 = time.time()
                    for i, seq_frames in enumerate(test_loader.dataset.frames):
                        val_preds += [np.hstack(all_f_preds[i])]
                        val_labels += [np.hstack(all_f_labels[i])]
                    val_preds = np.hstack(val_preds)
                    val_labels = np.hstack(val_labels)
                    t2 = time.time()
                    C_tot = fast_confusion(val_labels, val_preds,
                                           test_loader.dataset.label_values)
                    t3 = time.time()
                    print(' Stacking time : {:.1f}s'.format(t2 - t1))
                    print('Confusion time : {:.1f}s'.format(t3 - t2))

                    s1 = '\n'
                    for cc in C_tot:
                        for c in cc:
                            s1 += '{:7.0f} '.format(c)
                        s1 += '\n'
                    if debug:
                        print(s1)

                    # Remove ignored labels from confusions
                    for l_ind, label_value in reversed(
                            list(enumerate(test_loader.dataset.label_values))):
                        if label_value in test_loader.dataset.ignored_labels:
                            C_tot = np.delete(C_tot, l_ind, axis=0)
                            C_tot = np.delete(C_tot, l_ind, axis=1)

                    # Objects IoU
                    val_IoUs = IoU_from_confusions(C_tot)

                    # Compute IoUs
                    mIoU = np.mean(val_IoUs)
                    s2 = '{:5.2f} | '.format(100 * mIoU)
                    for IoU in val_IoUs:
                        s2 += '{:5.2f} '.format(100 * IoU)
                    print(s2 + '\n')

                    # Save a report
                    report_file = join(
                        report_path,
                        'report_{:04d}.txt'.format(int(np.floor(last_min))))
                    str = 'Report of the confusion and metrics\n'
                    str += '***********************************\n\n\n'
                    str += 'Confusion matrix:\n\n'
                    str += s1
                    str += '\nIoU values:\n\n'
                    str += s2
                    str += '\n\n'
                    with open(report_file, 'w') as f:
                        f.write(str)

            test_epoch += 1

            # Break when reaching number of desired votes
            if last_min > num_votes:
                break

        return
    def cloud_segmentation_test(self,
                                net,
                                test_loader,
                                config,
                                num_votes=100,
                                debug=False):
        """
        Test method for cloud segmentation models
        """

        ############
        # Initialize
        ############

        # Choose test smoothing parameter (0 for no smothing, 0.99 for big smoothing)
        test_smooth = 0.95
        test_radius_ratio = 0.7
        softmax = torch.nn.Softmax(1)

        # Number of classes including ignored labels
        nc_tot = test_loader.dataset.num_classes

        # Number of classes predicted by the model
        nc_model = config.num_classes

        # Initiate global prediction over test clouds
        self.test_probs = [
            np.zeros((l.shape[0], nc_model))
            for l in test_loader.dataset.input_labels
        ]

        # Test saving path
        if config.saving:
            test_path = join('test', config.saving_path.split('/')[-1])
            if not exists(test_path):
                makedirs(test_path)
            if not exists(join(test_path, 'predictions')):
                makedirs(join(test_path, 'predictions'))
            if not exists(join(test_path, 'probs')):
                makedirs(join(test_path, 'probs'))
            if not exists(join(test_path, 'potentials')):
                makedirs(join(test_path, 'potentials'))
        else:
            test_path = None

        # If on validation directly compute score
        if test_loader.dataset.set == 'validation':
            val_proportions = np.zeros(nc_model, dtype=np.float32)
            i = 0
            for label_value in test_loader.dataset.label_values:
                if label_value not in test_loader.dataset.ignored_labels:
                    val_proportions[i] = np.sum([
                        np.sum(labels == label_value)
                        for labels in test_loader.dataset.validation_labels
                    ])
                    i += 1
        else:
            val_proportions = None

        #####################
        # Network predictions
        #####################

        test_epoch = 0
        last_min = -0.5

        t = [time.time()]
        last_display = time.time()
        mean_dt = np.zeros(1)

        # Start test loop
        while True:
            print('Initialize workers')
            for i, batch in enumerate(test_loader):

                # New time
                t = t[-1:]
                t += [time.time()]

                if i == 0:
                    print('Done in {:.1f}s'.format(t[1] - t[0]))

                if 'cuda' in self.device.type:
                    batch.to(self.device)

                # Forward pass
                outputs = net(batch, config)

                t += [time.time()]

                # Get probs and labels
                stacked_probs = softmax(outputs).cpu().detach().numpy()
                s_points = batch.points[0].cpu().numpy()
                lengths = batch.lengths[0].cpu().numpy()
                in_inds = batch.input_inds.cpu().numpy()
                cloud_inds = batch.cloud_inds.cpu().numpy()
                torch.cuda.synchronize(self.device)

                # Get predictions and labels per instance
                # ***************************************

                i0 = 0
                for b_i, length in enumerate(lengths):

                    # Get prediction
                    points = s_points[i0:i0 + length]
                    probs = stacked_probs[i0:i0 + length]
                    inds = in_inds[i0:i0 + length]
                    c_i = cloud_inds[b_i]

                    if 0 < test_radius_ratio < 1:
                        mask = np.sum(
                            points**2,
                            axis=1) < (test_radius_ratio * config.in_radius)**2
                        inds = inds[mask]
                        probs = probs[mask]

                    # Update current probs in whole cloud
                    self.test_probs[c_i][inds] = test_smooth * self.test_probs[
                        c_i][inds] + (1 - test_smooth) * probs
                    i0 += length

                # Average timing
                t += [time.time()]
                if i < 2:
                    mean_dt = np.array(t[1:]) - np.array(t[:-1])
                else:
                    mean_dt = 0.9 * mean_dt + 0.1 * (np.array(t[1:]) -
                                                     np.array(t[:-1]))

                # Display
                if (t[-1] - last_display) > 1.0:
                    last_display = t[-1]
                    message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})'
                    print(
                        message.format(test_epoch, i,
                                       100 * i / config.validation_size,
                                       1000 * (mean_dt[0]),
                                       1000 * (mean_dt[1]),
                                       1000 * (mean_dt[2])))

            # Update minimum od potentials
            new_min = torch.min(test_loader.dataset.min_potentials)
            print('Test epoch {:d}, end. Min potential = {:.1f}'.format(
                test_epoch, new_min))
            #print([np.mean(pots) for pots in test_loader.dataset.potentials])

            # Save predicted cloud
            if last_min + 1 < new_min:

                # Update last_min
                last_min += 1

                # Show vote results (On subcloud so it is not the good values here)
                if test_loader.dataset.set == 'validation':
                    print('\nConfusion on sub clouds')
                    Confs = []
                    for i, file_path in enumerate(test_loader.dataset.files):

                        # Insert false columns for ignored labels
                        probs = np.array(self.test_probs[i], copy=True)
                        for l_ind, label_value in enumerate(
                                test_loader.dataset.label_values):
                            if label_value in test_loader.dataset.ignored_labels:
                                probs = np.insert(probs, l_ind, 0, axis=1)

                        # Predicted labels
                        preds = test_loader.dataset.label_values[np.argmax(
                            probs, axis=1)].astype(np.int32)

                        # Targets
                        targets = test_loader.dataset.input_labels[i]

                        # Confs
                        Confs += [
                            fast_confusion(targets, preds,
                                           test_loader.dataset.label_values)
                        ]

                    # Regroup confusions
                    C = np.sum(np.stack(Confs), axis=0).astype(np.float32)

                    # Remove ignored labels from confusions
                    for l_ind, label_value in reversed(
                            list(enumerate(test_loader.dataset.label_values))):
                        if label_value in test_loader.dataset.ignored_labels:
                            C = np.delete(C, l_ind, axis=0)
                            C = np.delete(C, l_ind, axis=1)

                    # Rescale with the right number of point per class
                    C *= np.expand_dims(
                        val_proportions / (np.sum(C, axis=1) + 1e-6), 1)

                    # Compute IoUs
                    IoUs = IoU_from_confusions(C)
                    mIoU = np.mean(IoUs)
                    s = '{:5.2f} | '.format(100 * mIoU)
                    for IoU in IoUs:
                        s += '{:5.2f} '.format(100 * IoU)
                    print(s + '\n')

                # Save real IoU once in a while
                if int(np.ceil(new_min)) % 10 == 0:

                    # Project predictions
                    print('\nReproject Vote #{:d}'.format(
                        int(np.floor(new_min))))
                    t1 = time.time()
                    proj_probs = []
                    for i, file_path in enumerate(test_loader.dataset.files):

                        print(i, file_path,
                              test_loader.dataset.test_proj[i].shape,
                              self.test_probs[i].shape)

                        print(test_loader.dataset.test_proj[i].dtype,
                              np.max(test_loader.dataset.test_proj[i]))
                        print(test_loader.dataset.test_proj[i][:5])

                        # Reproject probs on the evaluations points
                        probs = self.test_probs[i][
                            test_loader.dataset.test_proj[i], :]
                        proj_probs += [probs]

                    t2 = time.time()
                    print('Done in {:.1f} s\n'.format(t2 - t1))

                    # Show vote results
                    if test_loader.dataset.set == 'validation':
                        print('Confusion on full clouds')
                        t1 = time.time()
                        Confs = []
                        for i, file_path in enumerate(
                                test_loader.dataset.files):

                            # Insert false columns for ignored labels
                            for l_ind, label_value in enumerate(
                                    test_loader.dataset.label_values):
                                if label_value in test_loader.dataset.ignored_labels:
                                    proj_probs[i] = np.insert(proj_probs[i],
                                                              l_ind,
                                                              0,
                                                              axis=1)

                            # Get the predicted labels
                            preds = test_loader.dataset.label_values[np.argmax(
                                proj_probs[i], axis=1)].astype(np.int32)

                            # Confusion
                            targets = test_loader.dataset.validation_labels[i]
                            Confs += [
                                fast_confusion(
                                    targets, preds,
                                    test_loader.dataset.label_values)
                            ]

                        t2 = time.time()
                        print('Done in {:.1f} s\n'.format(t2 - t1))

                        # Regroup confusions
                        C = np.sum(np.stack(Confs), axis=0)

                        # Remove ignored labels from confusions
                        for l_ind, label_value in reversed(
                                list(
                                    enumerate(
                                        test_loader.dataset.label_values))):
                            if label_value in test_loader.dataset.ignored_labels:
                                C = np.delete(C, l_ind, axis=0)
                                C = np.delete(C, l_ind, axis=1)

                        IoUs = IoU_from_confusions(C)
                        mIoU = np.mean(IoUs)
                        s = '{:5.2f} | '.format(100 * mIoU)
                        for IoU in IoUs:
                            s += '{:5.2f} '.format(100 * IoU)
                        print('-' * len(s))
                        print(s)
                        print('-' * len(s) + '\n')

                    # Save predictions
                    print('Saving clouds')
                    t1 = time.time()
                    for i, file_path in enumerate(test_loader.dataset.files):

                        # Get file
                        points = test_loader.dataset.load_evaluation_points(
                            file_path)

                        # Get the predicted labels
                        preds = test_loader.dataset.label_values[np.argmax(
                            proj_probs[i], axis=1)].astype(np.int32)

                        # Save plys
                        cloud_name = file_path.split('/')[-1]
                        test_name = join(test_path, 'predictions', cloud_name)
                        write_ply(test_name, [points, preds],
                                  ['x', 'y', 'z', 'preds'])
                        test_name2 = join(test_path, 'probs', cloud_name)
                        prob_names = [
                            '_'.join(test_loader.dataset.label_to_names[label].
                                     split())
                            for label in test_loader.dataset.label_values
                        ]
                        write_ply(test_name2, [points, proj_probs[i]],
                                  ['x', 'y', 'z'] + prob_names)

                        # Save potentials
                        pot_points = np.array(
                            test_loader.dataset.pot_trees[i].data, copy=False)
                        pot_name = join(test_path, 'potentials', cloud_name)
                        pots = test_loader.dataset.potentials[i].numpy(
                        ).astype(np.float32)
                        write_ply(pot_name,
                                  [pot_points.astype(np.float32), pots],
                                  ['x', 'y', 'z', 'pots'])

                        # Save ascii preds
                        if test_loader.dataset.set == 'test':
                            if test_loader.dataset.name.startswith(
                                    'Semantic3D'):
                                ascii_name = join(
                                    test_path, 'predictions', test_loader.
                                    dataset.ascii_files[cloud_name])
                            else:
                                ascii_name = join(test_path, 'predictions',
                                                  cloud_name[:-4] + '.txt')
                            np.savetxt(ascii_name, preds, fmt='%d')

                    t2 = time.time()
                    print('Done in {:.1f} s\n'.format(t2 - t1))

            test_epoch += 1

            # Break when reaching number of desired votes
            if last_min > num_votes:
                break

        return
示例#18
0
    def prepare_ply(self):

        print('\nPreparing ply files')
        t0 = time.time()

        # Folder for the ply files
        paths = [join(self.path, 'scans'), join(self.path, 'scans_test')]
        new_paths = [self.train_path, self.test_path]
        mesh_paths = [
            join(self.path, 'training_meshes'),
            join(self.path, 'test_meshes')
        ]

        # Mapping from annot to NYU labels ID
        label_files = join(self.path, 'scannetv2-labels.combined.tsv')
        with open(label_files, 'r') as f:
            lines = f.readlines()
            names1 = [line.split('\t')[1] for line in lines[1:]]
            IDs = [int(line.split('\t')[4]) for line in lines[1:]]
            annot_to_nyuID = {n: id for n, id in zip(names1, IDs)}

        for path, new_path, mesh_path in zip(paths, new_paths, mesh_paths):

            # Create folder
            if not exists(new_path):
                makedirs(new_path)
            if not exists(mesh_path):
                makedirs(mesh_path)

            # Get scene names
            scenes = np.sort([f for f in listdir(path)])
            N = len(scenes)

            for i, scene in enumerate(scenes):

                #############
                # Load meshes
                #############

                # Check if file already done
                if exists(join(new_path, scene + '.ply')):
                    continue
                t1 = time.time()

                # Read mesh
                vertex_data, faces = read_ply(join(path, scene,
                                                   scene + '_vh_clean_2.ply'),
                                              triangular_mesh=True)
                vertices = np.vstack(
                    (vertex_data['x'], vertex_data['y'], vertex_data['z'])).T
                vertices_colors = np.vstack(
                    (vertex_data['red'], vertex_data['green'],
                     vertex_data['blue'])).T

                vertices_labels = np.zeros(vertices.shape[0], dtype=np.int32)
                if new_path == self.train_path:

                    # Load alignment matrix to realign points
                    align_mat = None
                    with open(join(path, scene, scene + '.txt'),
                              'r') as txtfile:
                        lines = txtfile.readlines()
                    for line in lines:
                        line = line.split()
                        if line[0] == 'axisAlignment':
                            align_mat = np.array([
                                float(x) for x in line[2:]
                            ]).reshape([4, 4]).astype(np.float32)
                    R = align_mat[:3, :3]
                    T = align_mat[:3, 3]
                    vertices = vertices.dot(R.T) + T

                    # Get objects segmentations
                    with open(
                            join(path, scene,
                                 scene + '_vh_clean_2.0.010000.segs.json'),
                            'r') as f:
                        segmentations = json.load(f)

                    segIndices = np.array(segmentations['segIndices'])

                    # Get objects classes
                    with open(
                            join(path, scene,
                                 scene + '_vh_clean.aggregation.json'),
                            'r') as f:
                        aggregation = json.load(f)

                    # Loop on object to classify points
                    for segGroup in aggregation['segGroups']:
                        c_name = segGroup['label']
                        if c_name in names1:
                            nyuID = annot_to_nyuID[c_name]
                            if nyuID in self.label_values:
                                for segment in segGroup['segments']:
                                    vertices_labels[segIndices ==
                                                    segment] = nyuID

                ###########################
                # Create finer point clouds
                ###########################

                # Rasterize mesh with 3d points (place more point than enough to subsample them afterwards)
                points, associated_vert_inds = rasterize_mesh(
                    vertices, faces, 0.003)

                # Subsample points
                sub_points, sub_vert_inds = grid_subsampling(
                    points, labels=associated_vert_inds, sampleDl=0.01)

                # Collect colors from associated vertex
                sub_colors = vertices_colors[sub_vert_inds.ravel(), :]

                if new_path == self.train_path:

                    # Collect labels from associated vertex
                    sub_labels = vertices_labels[sub_vert_inds.ravel()]

                    filename_point = join(new_path, scene + '.ply')

                    # calculate boundaries
                    points_num = sub_points.shape[0]

                    # find semantic boundaries
                    is_boundaries = np.zeros(points_num, dtype=np.int32)

                    # store boundary classes
                    # 0 indicating empty
                    boundary_class_0 = np.zeros(points_num, dtype=np.int32)
                    boundary_class_1 = np.zeros(points_num, dtype=np.int32)
                    boundary_class_2 = np.zeros(points_num, dtype=np.int32)

                    # kd_tree
                    cloud_tree = KDTree(sub_points)

                    # check neighborhood
                    for i in range(points_num):

                        center_class = sub_labels[i]

                        # Pass all unclassified points
                        if center_class == 0:
                            continue

                        # check radius neighbors (r = 0.02)
                        inds = cloud_tree.query_radius(
                            sub_points[i].reshape(1, -1), 0.02)

                        # flag indicating if this point is boundary
                        flag = 0
                        for j in inds[0]:

                            # Pass all unclassified points
                            if sub_labels[j] == 0:
                                continue

                            # when class change is found
                            # 1. boundary of considered class and unconsidered class
                            # 2. boundary of considered classes
                            if (sub_labels[j] != center_class):
                                # only store considered classes(>0)
                                flag = 1
                                # if center is a point with unconsidered class
                                if center_class == -1:
                                    if boundary_class_0[i] == 0:
                                        boundary_class_0[i] = sub_labels[j]
                                    elif sub_labels[j] == boundary_class_0[i]:
                                        continue
                                    elif boundary_class_1[i] == 0:
                                        boundary_class_1[i] = sub_labels[j]
                                    elif sub_labels[j] == boundary_class_1[i]:
                                        continue
                                    else:
                                        boundary_class_2[i] = sub_labels[j]
                                # if center is a point with considered class
                                else:
                                    boundary_class_0[i] = center_class
                                    if sub_labels[j] == -1:
                                        continue
                                    if boundary_class_1[i] == 0:
                                        boundary_class_1[i] = sub_labels[j]
                                    elif boundary_class_1[i] == sub_labels[j]:
                                        continue
                                    else:
                                        boundary_class_2[i] = sub_labels[j]
                        if flag:
                            is_boundaries[i] = 1

                    # save points
                    write_ply(filename_point, [
                        sub_points, sub_colors, sub_labels, is_boundaries,
                        boundary_class_0, boundary_class_1, boundary_class_2,
                        sub_vert_inds
                    ], [
                        'x', 'y', 'z', 'red', 'green', 'blue', 'class',
                        'is_boundary', 'b_c_0', 'b_c_1', 'b_c_2', 'vert_ind'
                    ])

                    #############################
                    # Prepare meshes for testing
                    #############################

                    proj_inds = np.squeeze(
                        cloud_tree.query(vertices, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    vertices_boundaries = is_boundaries[proj_inds]
                    vertices_b_c_0s = boundary_class_0[proj_inds]
                    vertices_b_c_1s = boundary_class_1[proj_inds]
                    vertices_b_c_2s = boundary_class_2[proj_inds]

                    # Save mesh
                    write_ply(join(mesh_path, scene + '_mesh.ply'), [
                        vertices, vertices_colors, vertices_labels,
                        vertices_boundaries, vertices_b_c_0s, vertices_b_c_1s,
                        vertices_b_c_2s
                    ], [
                        'x', 'y', 'z', 'red', 'green', 'blue', 'class',
                        'is_boundary', 'b_c_0', 'b_c_1', 'b_c_2'
                    ],
                              triangular_faces=faces)

                else:

                    # Save points
                    write_ply(
                        filename_point,
                        [sub_points, sub_colors, sub_vert_inds],
                        ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind'])

                    # Save mesh
                    write_ply(join(mesh_path, scene + '_mesh.ply'),
                              [vertices, vertices_colors],
                              ['x', 'y', 'z', 'red', 'green', 'blue'],
                              triangular_faces=faces)

                #  Display
                print('{:s} {:.1f} sec  / {:.1f}%'.format(
                    scene,
                    time.time() - t1, 100 * i / N))

        print('Done in {:.1f}s'.format(time.time() - t0))
示例#19
0
    # Computes the plane passing through the 3 points
    t0 = time.time()
    ref_pt, normal = compute_plane(pts)
    t1 = time.time()
    print('plane computation done in {:.3f} seconds'.format(t1 - t0))
    
    # Find points in the plane and others
    t0 = time.time()
    points_in_plane = in_plane(points, ref_pt, normal, threshold_in)
    t1 = time.time()
    print('plane extraction done in {:.3f} seconds'.format(t1 - t0))
    plane_inds = points_in_plane.nonzero()[0]
    remaining_inds = (1-points_in_plane).nonzero()[0]
    
    # Save extracted plane and remaining points
    write_ply('../plane.ply', [points[plane_inds], colors[plane_inds], labels[plane_inds]], ['x', 'y', 'z', 'red', 'green', 'blue', 'label'])
    write_ply('../remaining_points_plane.ply', [points[remaining_inds], colors[remaining_inds], labels[remaining_inds]], ['x', 'y', 'z', 'red', 'green', 'blue', 'label'])
    

    # Computes the best plane fitting the point cloud
    # ***********************************
    #
    #
    
    print('\n--- 3) ---\n')

    # Define parameters of RANSAC
    NB_RANDOM_DRAWS = 100
    threshold_in = 0.05

    # Find best plane by RANSAC
示例#20
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory (Load KDTree for neighbors searches
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        # Create path for files
        tree_path = join(self.path,
                         'input_{:.3f}'.format(subsampling_parameter))
        if not exists(tree_path):
            makedirs(tree_path)

        # All training and test files
        files = np.hstack((self.train_files, self.test_files))

        # Initiate containers
        self.input_trees = {'training': [], 'validation': [], 'test': []}
        self.input_colors = {'training': [], 'validation': [], 'test': []}
        self.input_vert_inds = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': []}
        self.input_boundaries = {'training': [], 'validation': []}
        self.input_b_c_0 = {'training': [], 'validation': []}
        self.input_b_c_1 = {'training': [], 'validation': []}
        self.input_b_c_2 = {'training': [], 'validation': []}

        # Advanced display
        N = len(files)
        progress_n = 30
        fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'
        print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(
            subsampling_parameter))

        for i, file_path in enumerate(files):

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]
            if 'train' in cloud_folder:
                if self.validation_split == 1:
                    if cloud_name in self.validation_clouds:
                        self.all_splits += [1]
                        cloud_split = 'validation'
                    else:
                        self.all_splits += [0]
                        cloud_split = 'training'
                else:
                    self.all_splits += [0]
                    cloud_split = 'training'
            else:
                cloud_split = 'test'

            if (cloud_split != 'test'
                    and self.load_test) or (cloud_split == 'test'
                                            and not self.load_test):
                continue

            # Name of the input files
            KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))

            # Check if inputs have already been computed
            if isfile(KDTree_file):

                # read ply with data
                data = read_ply(sub_ply_file)
                sub_colors = np.vstack(
                    (data['red'], data['green'], data['blue'])).T
                sub_vert_inds = data['vert_ind']
                if cloud_split == 'test':
                    sub_labels = None
                else:
                    sub_labels = data['class']
                    sub_boundaries = data['is_boundary']
                    sub_b_c_0 = data['b_c_0']
                    sub_b_c_1 = data['b_c_1']
                    sub_b_c_2 = data['b_c_2']

                # Read pkl with search tree
                with open(KDTree_file, 'rb') as f:
                    search_tree = pickle.load(f)

            else:

                # Read ply file
                data = read_ply(file_path)
                points = np.vstack((data['x'], data['y'], data['z'])).T
                colors = np.vstack(
                    (data['red'], data['green'], data['blue'])).T
                if cloud_split == 'test':
                    int_features = data['vert_ind']
                else:
                    int_features = np.vstack(
                        (data['vert_ind'], data['class'], data['is_boundary'],
                         data['b_c_0'], data['b_c_1'], data['b_c_2'])).T

                # Subsample cloud
                sub_points, sub_colors, sub_int_features = grid_subsampling(
                    points,
                    features=colors,
                    labels=int_features,
                    sampleDl=subsampling_parameter)

                # Rescale float color and squeeze label
                sub_colors = sub_colors / 255
                if cloud_split == 'test':
                    sub_vert_inds = np.squeeze(sub_int_features)
                    sub_labels = None
                else:
                    sub_vert_inds = sub_int_features[:, 0]
                    sub_labels = sub_int_features[:, 1]
                    sub_boundaries = sub_int_features[:, 2]
                    sub_b_c_0 = sub_int_features[:, 3]
                    sub_b_c_1 = sub_int_features[:, 4]
                    sub_b_c_2 = sub_int_features[:, 5]

                # Get chosen neighborhoods
                search_tree = KDTree(sub_points, leaf_size=50)

                # Save KDTree
                with open(KDTree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                # Save ply
                if cloud_split == 'test':
                    write_ply(
                        sub_ply_file, [sub_points, sub_colors, sub_vert_inds],
                        ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind'])
                else:
                    write_ply(sub_ply_file, [
                        sub_points, sub_colors, sub_labels, sub_boundaries,
                        sub_b_c_0, sub_b_c_1, sub_b_c_2, sub_vert_inds
                    ], [
                        'x', 'y', 'z', 'red', 'green', 'blue', 'class',
                        'is_boundary', 'b_c_0', 'b_c_1', 'b_c_2', 'vert_ind'
                    ])

            # Fill data containers
            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_colors]
            self.input_vert_inds[cloud_split] += [sub_vert_inds]
            if cloud_split in ['training', 'validation']:
                self.input_labels[cloud_split] += [sub_labels]
                self.input_boundaries[cloud_split] += [sub_boundaries]
                self.input_b_c_0[cloud_split] += [sub_b_c_0]
                self.input_b_c_1[cloud_split] += [sub_b_c_1]
                self.input_b_c_2[cloud_split] += [sub_b_c_2]

            print('', end='\r')
            print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N),
                  end='',
                  flush=True)

        # Get number of clouds
        self.num_training = len(self.input_trees['training'])
        self.num_validation = len(self.input_trees['validation'])
        self.num_test = len(self.input_trees['test'])

        # Get validation and test reprojection indices
        self.validation_proj = []
        self.validation_labels = []
        self.validation_boundaries = []
        self.validation_b_c_0 = []
        self.validation_b_c_1 = []
        self.validation_b_c_2 = []
        self.test_proj = []
        self.test_labels = []
        i_val = 0
        i_test = 0

        # Advanced display
        N = self.num_validation + self.num_test
        print('', end='\r')
        print(fmt_str.format('#' * progress_n, 100), flush=True)
        print('\nPreparing reprojection indices for validation and test')

        for i, file_path in enumerate(files):

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]

            # Validation projection and labels
            if (
                    not self.load_test
            ) and 'train' in cloud_folder and cloud_name in self.validation_clouds:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels, boundaries, b_c_0, b_c_1, b_c_2 = pickle.load(
                            f)
                else:
                    # Get original mesh
                    mesh_path = file_path.split('/')
                    mesh_path[-2] = 'training_meshes'
                    mesh_path = '/'.join(mesh_path)
                    vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply',
                                                  triangular_mesh=True)
                    vertices = np.vstack((vertex_data['x'], vertex_data['y'],
                                          vertex_data['z'])).T
                    labels = vertex_data['class']
                    boundaries = vertex_data['is_boundary']
                    b_c_0 = vertex_data['b_c_0']
                    b_c_1 = vertex_data['b_c_1']
                    b_c_2 = vertex_data['b_c_2']

                    # Compute projection inds
                    proj_inds = np.squeeze(
                        self.input_trees['validation'][i_val].query(
                            vertices, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([
                            proj_inds, labels, boundaries, b_c_0, b_c_1, b_c_2
                        ], f)

                self.validation_proj += [proj_inds]
                self.validation_labels += [labels]
                self.validation_boundaries += [boundaries]
                self.validation_b_c_0 += [b_c_0]
                self.validation_b_c_1 += [b_c_1]
                self.validation_b_c_2 += [b_c_2]
                i_val += 1

            # Test projection
            if self.load_test and 'test' in cloud_folder:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:
                    # Get original mesh
                    mesh_path = file_path.split('/')
                    mesh_path[-2] = 'test_meshes'
                    mesh_path = '/'.join(mesh_path)
                    vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply',
                                                  triangular_mesh=True)
                    vertices = np.vstack((vertex_data['x'], vertex_data['y'],
                                          vertex_data['z'])).T
                    labels = np.zeros(vertices.shape[0], dtype=np.int32)

                    # Compute projection inds
                    proj_inds = np.squeeze(
                        self.input_trees['test'][i_test].query(
                            vertices, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.test_proj += [proj_inds]
                self.test_labels += [labels]
                i_test += 1

            print('', end='\r')
            print(fmt_str.format('#' * (((i_val + i_test) * progress_n) // N),
                                 100 * (i_val + i_test) / N),
                  end='',
                  flush=True)

        print('i_val={}'.format(i_val))
        print('i_test={}'.format(i_test))
        print('\n')

        return
示例#21
0
def training_and_test(useGPU=True, useMLP=True, useMLP_Conv=True, useRF=True):
    num_per_class = 10000

    # Collect training features / labels
    # **********************************
    #

    with Timer('Collect Training Features'):
        X, Y = advanced_point_choice(num_per_class)
        Y = Y.astype(int)
        Y_weight = class_weight.compute_sample_weight('balanced', Y)

    # Load cloud as a [N x 3] matrix
    with Timer('Collect Testing Features'):

        test_pt_cloud = load_pts(test_pt_cloud_file)
        if exists(test_cache_file):
            with Timer('!!Read from last Cache!!'):
                f = np.load(test_cache_file)
                test_X = f['features']
                test_Y = f['labels']
        else:
            with Timer('reading features'):
                test_X, test_Y = load_features_label(test_file)
                np.savez(test_cache_file, features=test_X, labels=test_Y)
        if not np.isfinite(test_X).all():
            print("feature_test contains nan or inf!")
            exit()
        test_Y = test_Y.astype(int)
        test_Y_weight = class_weight.compute_sample_weight('balanced', test_Y)

    his_train = np.bincount(Y, minlength=CLASS_SIZE)
    his_test = np.bincount(test_Y, minlength=CLASS_SIZE)
    print('Number of sample per class:')
    print('{:<20}\t{:>10}\t{:>10}'.format('class', 'train', 'test'))
    for i in range(1, CLASS_SIZE):
        print('{:<20}\t{:>10}\t{:>10}'.format(target_names[i], his_train[i],
                                              his_test[i]))

    if useRF:
        predictions = random_forest(X, Y, Y_weight, test_X, test_Y,
                                    test_Y_weight)
        with Timer('Save predictions ply'):
            write_ply(
                '../preds_RF.ply',
                [test_pt_cloud, predictions.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])

    if useMLP:
        predictions, confidence, mlp_history = mlp(useGPU, X, Y, Y_weight,
                                                   test_X, test_Y,
                                                   test_Y_weight)
        with Timer('Save predictions ply'):
            write_ply(
                '../preds_MLP.ply',
                [test_pt_cloud, predictions.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
            predictions_90 = predictions * (confidence > 0.9)
            write_ply(
                '../preds_MLP_90.ply',
                [test_pt_cloud, predictions_90.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
            predictions_95 = predictions * (confidence > 0.95)
            write_ply(
                '../preds_MLP_95.ply',
                [test_pt_cloud, predictions_95.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
            predictions_99 = predictions * (confidence > 0.99)
            write_ply(
                '../preds_MLP_99.ply',
                [test_pt_cloud, predictions_99.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])

    if useMLP_Conv:
        predictions, confidence, mlp_conv_history = mlp_conv(
            useGPU, X, Y, Y_weight, test_X, test_Y, test_Y_weight)
        with Timer('Save predictions ply'):
            write_ply(
                '../preds_MLPconv.ply',
                [test_pt_cloud, predictions.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
            predictions_90 = predictions * (confidence > 0.9)
            write_ply(
                '../preds_MLPconv_90.ply',
                [test_pt_cloud, predictions_90.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
            predictions_95 = predictions * (confidence > 0.95)
            write_ply(
                '../preds_MLPconv_95.ply',
                [test_pt_cloud, predictions_95.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
            predictions_99 = predictions * (confidence > 0.99)
            write_ply(
                '../preds_MLPconv_99.ply',
                [test_pt_cloud, predictions_99.astype(np.uint8)],
                ['x', 'y', 'z', 'labels'])
    if mlp_conv_history is not None:
        plt.plot(mlp_conv_history.history['loss'], label='conv loss')
        plt.plot(mlp_conv_history.history['val_loss'], label='conv test loss')
    if mlp_history is not None:
        plt.plot(mlp_history.history['loss'], label='mlp loss')
        plt.plot(mlp_history.history['val_loss'], label='mlp test loss')
    plt.legend()
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.show()
示例#22
0
            [np.asarray(list(x)) for x in bunny_o_cloud]).T
        bunny_r_cloud = np.asarray(
            [np.asarray(list(x)) for x in bunny_r_cloud]).T

        # Find the best transformation
        R, T = best_rigid_transform(bunny_r_cloud, bunny_o_cloud)

        # Apply the tranformation
        bunny_r_cloud = R @ bunny_r_cloud + T

        # Save cloud
        bunny_o_cloud = np.asarray(
            [np.asarray(list(x)) for x in bunny_o_cloud]).T
        bunny_r_cloud = np.asarray(
            [np.asarray(list(x)) for x in bunny_r_cloud]).T
        write_ply('../results/bunny_returned_bestRT.ply', [bunny_r_cloud],
                  ['x', 'y', 'z'])

        # Compute RMS
        rms = RMS(bunny_o_cloud, bunny_r_cloud)

        # Print RMS
        print(rms)

    # Test ICP and visualize
    # **********************
    #

    # If statement to skip this part if wanted
    if False:

        # Cloud paths
示例#23
0
    def test_multi_segmentation(self,
                                model,
                                dataset,
                                num_votes=100,
                                num_saves=10):

        ##################
        # Pre-computations
        ##################

        print('Preparing test structures')
        t1 = time.time()

        # Collect original test file names
        original_path = join(dataset.path, 'test_ply')
        test_names = [
            f[:-4] for f in listdir(original_path) if f[-4:] == '.ply'
        ]
        test_names = np.sort(test_names)

        original_labels = []
        original_points = []
        projection_inds = []
        for i, cloud_name in enumerate(test_names):

            # Read data in ply file
            data = read_ply(join(original_path, cloud_name + '.ply'))
            points = np.vstack((data['x'], -data['z'], data['y'])).T
            original_labels += [data['label'] - 1]
            original_points += [points]

            # Create tree structure to compute neighbors
            tree = KDTree(dataset.input_points['test'][i])
            projection_inds += [
                np.squeeze(tree.query(points, return_distance=False))
            ]

        t2 = time.time()
        print('Done in {:.1f} s\n'.format(t2 - t1))

        ##########
        # Initiate
        ##########

        # Test saving path
        if model.config.saving:
            test_path = join('test', model.saving_path.split('/')[-1])
            if not exists(test_path):
                makedirs(test_path)
        else:
            test_path = None

        # Initialise iterator with test data
        self.sess.run(dataset.test_init_op)

        # Initiate result containers
        average_predictions = [
            np.zeros((1, 1), dtype=np.float32) for _ in test_names
        ]

        #####################
        # Network predictions
        #####################

        mean_dt = np.zeros(2)
        last_display = time.time()
        for v in range(num_votes):

            # Run model on all test examples
            # ******************************

            # Initiate result containers
            all_predictions = []
            all_obj_inds = []

            while True:
                try:

                    # Run one step of the model
                    t = [time.time()]
                    ops = (self.prob_logits, model.labels,
                           model.inputs['super_labels'],
                           model.inputs['object_inds'],
                           model.inputs['in_batches'])
                    preds, labels, obj_labels, o_inds, batches = self.sess.run(
                        ops, {model.dropout_prob: 1.0})
                    t += [time.time()]

                    # Stack all predictions for each class separately
                    max_ind = np.max(batches)
                    for b_i, b in enumerate(batches):

                        # Eliminate shadow indices
                        b = b[b < max_ind - 0.5]

                        # Get prediction (only for the concerned parts)
                        obj = obj_labels[b[0]]
                        predictions = preds[b][:, :model.config.
                                               num_classes[obj]]

                        # Stack all results
                        all_predictions += [predictions]
                        all_obj_inds += [o_inds[b_i]]

                    # Average timing
                    t += [time.time()]
                    mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                                       np.array(t[:-1]))

                    # Display
                    if (t[-1] - last_display) > 1.0:
                        last_display = t[-1]
                        message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'
                        print(
                            message.format(
                                v,
                                100 * len(all_predictions) / dataset.num_test,
                                1000 * (mean_dt[0]), 1000 * (mean_dt[1])))

                except tf.errors.OutOfRangeError:
                    break

            # Project predictions on original point clouds
            # ********************************************

            print('\nGetting test confusions')
            t1 = time.time()

            for i, probs in enumerate(all_predictions):

                # Interpolate prediction from current positions to original points
                obj_i = all_obj_inds[i]
                proj_predictions = probs[projection_inds[obj_i]]

                # Average prediction across votes
                average_predictions[obj_i] = average_predictions[obj_i] + \
                                             (proj_predictions - average_predictions[obj_i]) / (v + 1)

            Confs = []
            for obj_i, avg_probs in enumerate(average_predictions):

                # Compute confusion matrices
                parts = [j for j in range(avg_probs.shape[1])]
                Confs += [
                    confusion_matrix(original_labels[obj_i],
                                     np.argmax(avg_probs, axis=1), parts)
                ]

            t2 = time.time()
            print('Done in {:.1f} s\n'.format(t2 - t1))

            # Save the best/worst segmentations per class
            # *******************************************

            print('Saving test examples')
            t1 = time.time()

            # Regroup confusions per object class
            Confs = np.array(Confs)
            obj_mIoUs = []
            for l in dataset.label_values:

                # Get confusions for this object
                obj_inds = np.where(dataset.input_labels['test'] == l)[0]
                obj_confs = np.stack(Confs[obj_inds])

                # Get IoU
                obj_IoUs = IoU_from_confusions(obj_confs)
                obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]

                # Get X best and worst prediction
                order = np.argsort(obj_mIoUs[-1])
                worst_inds = obj_inds[order[:num_saves]]
                best_inds = obj_inds[order[:-num_saves - 1:-1]]
                worst_IoUs = obj_IoUs[order[:num_saves]]
                best_IoUs = obj_IoUs[order[:-num_saves - 1:-1]]

                # Save the names in a file
                obj_path = join(test_path, dataset.label_to_names[l])
                if not exists(obj_path):
                    makedirs(obj_path)
                worst_file = join(obj_path, 'worst_inds.txt')
                best_file = join(obj_path, 'best_inds.txt')
                with open(worst_file, "w") as text_file:
                    for w_i, w_IoUs in zip(worst_inds, worst_IoUs):
                        text_file.write('{:d} {:s} :'.format(
                            w_i, test_names[w_i]))
                        for IoU in w_IoUs:
                            text_file.write(' {:.1f}'.format(100 * IoU))
                        text_file.write('\n')

                with open(best_file, "w") as text_file:
                    for b_i, b_IoUs in zip(best_inds, best_IoUs):
                        text_file.write('{:d} {:s} :'.format(
                            b_i, test_names[b_i]))
                        for IoU in b_IoUs:
                            text_file.write(' {:.1f}'.format(100 * IoU))
                        text_file.write('\n')

                # Save the clouds
                for i, w_i in enumerate(worst_inds):
                    filename = join(obj_path, 'worst_{:02d}.ply'.format(i + 1))
                    preds = np.argmax(average_predictions[w_i],
                                      axis=1).astype(np.int32)
                    write_ply(
                        filename,
                        [original_points[w_i], original_labels[w_i], preds],
                        ['x', 'y', 'z', 'gt', 'pre'])

                for i, b_i in enumerate(best_inds):
                    filename = join(obj_path, 'best_{:02d}.ply'.format(i + 1))
                    preds = np.argmax(average_predictions[b_i],
                                      axis=1).astype(np.int32)
                    write_ply(
                        filename,
                        [original_points[b_i], original_labels[b_i], preds],
                        ['x', 'y', 'z', 'gt', 'pre'])

            t2 = time.time()
            print('Done in {:.1f} s\n'.format(t2 - t1))

            # Display results
            # ***************

            objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]
            instance_average = np.mean(np.hstack(obj_mIoUs))
            class_average = np.mean(objs_average)

            print(
                'Objs | Inst | Air  Bag  Cap  Car  Cha  Ear  Gui  Kni  Lam  Lap  Mot  Mug  Pis  Roc  Ska  Tab'
            )
            print(
                '-----|------|--------------------------------------------------------------------------------'
            )

            s = '{:4.1f} | {:4.1f} | '.format(100 * class_average,
                                              100 * instance_average)
            for AmIoU in objs_average:
                s += '{:4.1f} '.format(100 * AmIoU)
            print(s + '\n')

            # Initialise iterator with test data
            self.sess.run(dataset.test_init_op)

        return
示例#24
0
    def cloud_segmentation_predict(self, net, val_loader, config, debug=False):
        """
        Validation method for cloud segmentation models
        """

        ############
        # Initialize
        ############

        t0 = time.time()
        # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
        val_smooth = 0.95
        softmax = torch.nn.Softmax(1)

        # Do not validate if dataset has no validation cloud
        # if val_loader.dataset.validation_split not in val_loader.dataset.all_splits:
        # return

        # Number of classes including ignored labels
        nc_tot = val_loader.dataset.num_classes

        # Number of classes predicted by the model
        nc_model = config.num_classes

        #print(nc_tot)
        #print(nc_model)

        # Initiate global prediction over validation clouds
        if not hasattr(self, 'validation_probs'):
            self.validation_probs = [
                np.zeros((l.shape[0], nc_model))
                for l in val_loader.dataset.input_labels
            ]
            self.val_proportions = np.zeros(nc_model, dtype=np.float32)
            i = 0
            for label_value in val_loader.dataset.label_values:
                if label_value not in val_loader.dataset.ignored_labels:
                    self.val_proportions[i] = np.sum([
                        np.sum(labels == label_value)
                        for labels in val_loader.dataset.validation_labels
                    ])
                    i += 1

        #####################
        # Network predictions
        #####################

        predictions = []
        targets = []

        t = [time.time()]
        last_display = time.time()
        mean_dt = np.zeros(1)

        t1 = time.time()
        # Start prediction loop
        net.eval()
        for i, batch in enumerate(val_loader):

            # New time
            t = t[-1:]
            t += [time.time()]

            if 'cuda' in self.device.type:
                batch.to(self.device)

            # Forward pass
            outputs = net(batch, config)

            # Get probs and labels
            stacked_probs = softmax(outputs).cpu().detach().numpy()
            labels = batch.labels.cpu().numpy()
            lengths = batch.lengths[0].cpu().numpy()
            in_inds = batch.input_inds.cpu().numpy()
            cloud_inds = batch.cloud_inds.cpu().numpy()
            torch.cuda.synchronize(self.device)

            # Get predictions and labels per instance
            # ***************************************

            i0 = 0
            for b_i, length in enumerate(lengths):

                # Get prediction
                target = labels[i0:i0 + length]
                probs = stacked_probs[i0:i0 + length]
                inds = in_inds[i0:i0 + length]
                c_i = cloud_inds[b_i]

                # Update current probs in whole cloud
                self.validation_probs[c_i][inds] = val_smooth * self.validation_probs[c_i][inds] \
                                                   + (1 - val_smooth) * probs

                # Stack all prediction for this epoch
                predictions.append(probs)
                targets.append(target.astype(np.int64))
                i0 += length

            # Average timing
            t += [time.time()]
            mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                               np.array(t[:-1]))

            # Display
            if (t[-1] - last_display) > 1.0:
                last_display = t[-1]
                message = 'Validation : {:.1f}% (timings : {:4.2f} {:4.2f})'
                print(
                    message.format(100 * i / config.validation_size,
                                   1000 * (mean_dt[0]), 1000 * (mean_dt[1])))

        t2 = time.time()

        # Confusions for our subparts of validation set
        Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
        for i, (probs, truth) in enumerate(zip(predictions, targets)):

            # Insert false columns for ignored labels
            for l_ind, label_value in enumerate(
                    val_loader.dataset.label_values):
                if label_value in val_loader.dataset.ignored_labels:
                    probs = np.insert(probs, l_ind, 0, axis=1)

            # Predicted labels
            preds = val_loader.dataset.label_values[np.argmax(probs, axis=1)]

            # Confusions
            #Here
            #print("Truth = {}, Predictions = {}, Label Values = {}".format(truth.dtype, preds.dtype, val_loader.dataset.label_values))
            Confs[i, :, :] = fast_confusion(
                truth, preds, val_loader.dataset.label_values).astype(np.int32)

        t3 = time.time()

        # Sum all confusions
        C = np.sum(Confs, axis=0).astype(np.float32)

        # Remove ignored labels from confusions
        for l_ind, label_value in reversed(
                list(enumerate(val_loader.dataset.label_values))):
            if label_value in val_loader.dataset.ignored_labels:
                C = np.delete(C, l_ind, axis=0)
                C = np.delete(C, l_ind, axis=1)

        # Balance with real validation proportions
        C *= np.expand_dims(self.val_proportions / (np.sum(C, axis=1) + 1e-6),
                            1)

        t4 = time.time()

        # Objects IoU
        IoUs = IoU_from_confusions(C)

        t5 = time.time()

        # Saving (optionnal)
        if config.saving:

            # Name of saving file
            test_file = join(config.saving_path, 'test_IoUs.txt')
            print(test_file)

            # Line to write:
            line = ''
            for IoU in IoUs:
                line += '{:.3f} '.format(IoU)
            line = line + '\n'

            # Write in file
            if exists(test_file):
                with open(test_file, "a") as text_file:
                    text_file.write(line)
            else:
                with open(test_file, "w") as text_file:
                    text_file.write(line)

            # Save potentials
            pot_path = join(config.saving_path, 'potentials')
            if not exists(pot_path):
                makedirs(pot_path)
            files = val_loader.dataset.files
            for i, file_path in enumerate(files):
                pot_points = np.array(val_loader.dataset.pot_trees[i].data,
                                      copy=False)
                cloud_name = file_path.split('/')[-1]
                pot_name = join(pot_path, cloud_name)
                pots = val_loader.dataset.potentials[i].numpy().astype(
                    np.float32)
                write_ply(pot_name, [pot_points.astype(np.float32), pots],
                          ['x', 'y', 'z', 'pots'])

        t6 = time.time()

        # Print instance mean
        mIoU = 100 * np.mean(IoUs)
        print('{:s} mean IoU = {:.1f}%'.format(config.dataset, mIoU))

        # Save predicted cloud occasionally
        if config.saving:
            val_path = join(config.saving_path,
                            'val_preds_{:d}'.format(self.epoch + 1))
            if not exists(val_path):
                makedirs(val_path)
            files = val_loader.dataset.files
            for i, file_path in enumerate(files):

                # Get points
                points = val_loader.dataset.load_evaluation_points(file_path)

                # Get probs on our own ply points
                sub_probs = self.validation_probs[i]

                # Insert false columns for ignored labels
                for l_ind, label_value in enumerate(
                        val_loader.dataset.label_values):
                    if label_value in val_loader.dataset.ignored_labels:
                        sub_probs = np.insert(sub_probs, l_ind, 0, axis=1)

                # Get the predicted labels
                sub_preds = val_loader.dataset.label_values[np.argmax(
                    sub_probs, axis=1).astype(np.int32)]

                # Reproject preds on the evaluations points
                preds = (sub_preds[val_loader.dataset.test_proj[i]]).astype(
                    np.int32)

                # Path of saved validation file
                cloud_name = file_path.split('/')[-1]
                val_name = join(val_path, cloud_name)

                # Save file
                labels = val_loader.dataset.validation_labels[i].astype(
                    np.int32)
                write_ply(val_name, [points, preds, labels],
                          ['x', 'y', 'z', 'preds', 'class'])

        # Display timings
        t7 = time.time()
        net.train()
        if debug:
            print('\n************************\n')
            print('Validation timings:')
            print('Init ...... {:.1f}s'.format(t1 - t0))
            print('Loop ...... {:.1f}s'.format(t2 - t1))
            print('Confs ..... {:.1f}s'.format(t3 - t2))
            print('Confs bis . {:.1f}s'.format(t4 - t3))
            print('IoU ....... {:.1f}s'.format(t5 - t4))
            print('Save1 ..... {:.1f}s'.format(t6 - t5))
            print('Save2 ..... {:.1f}s'.format(t7 - t6))
            print('\n************************\n')

        return
示例#25
0
    def test_cloud_segmentation_on_val(self, model, dataset, num_votes=100):

        ##########
        # Initiate
        ##########

        # Smoothing parameter for votes
        test_smooth = 0.95

        # Initialise iterator with train data
        self.sess.run(dataset.val_init_op)

        # Initiate global prediction over test clouds
        nc_model = model.config.num_classes
        self.test_probs = [
            np.zeros((l.shape[0], nc_model), dtype=np.float32)
            for l in dataset.input_labels['validation']
        ]

        # Number of points per class in validation set
        val_proportions = np.zeros(nc_model, dtype=np.float32)
        i = 0
        for label_value in dataset.label_values:
            if label_value not in dataset.ignored_labels:
                val_proportions[i] = np.sum([
                    np.sum(labels == label_value)
                    for labels in dataset.validation_labels
                ])
                i += 1

        # Test saving path
        if model.config.saving:
            test_path = join('test', model.saving_path.split('/')[-1])
            if not exists(test_path):
                makedirs(test_path)
            if not exists(join(test_path, 'val_predictions')):
                makedirs(join(test_path, 'val_predictions'))
            if not exists(join(test_path, 'val_probs')):
                makedirs(join(test_path, 'val_probs'))
        else:
            test_path = None

        #####################
        # Network predictions
        #####################

        i0 = 0
        epoch_ind = 0
        last_min = -0.5
        mean_dt = np.zeros(2)
        last_display = time.time()
        while last_min < num_votes:

            try:
                # Run one step of the model.
                t = [time.time()]
                ops = (self.prob_logits, model.labels,
                       model.inputs['in_batches'], model.inputs['point_inds'],
                       model.inputs['cloud_inds'])
                stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(
                    ops, {model.dropout_prob: 1.0})
                t += [time.time()]

                # Get predictions and labels per instance
                # ***************************************

                # Stack all validation predictions for each class separately
                max_ind = np.max(batches)
                for b_i, b in enumerate(batches):
                    # Eliminate shadow indices
                    b = b[b < max_ind - 0.5]

                    # Get prediction (only for the concerned parts)
                    probs = stacked_probs[b]
                    inds = point_inds[b]
                    c_i = cloud_inds[b_i]

                    # Update current probs in whole cloud
                    self.test_probs[c_i][inds] = test_smooth * self.test_probs[
                        c_i][inds] + (1 - test_smooth) * probs

                # Average timing
                t += [time.time()]
                mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                                   np.array(t[:-1]))

                # Display
                if (t[-1] - last_display) > 1.0:
                    last_display = t[-1]
                    message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
                    print(
                        message.format(
                            epoch_ind, i0, 1000 * (mean_dt[0]),
                            1000 * (mean_dt[1]),
                            np.min(dataset.min_potentials['validation'])))

                i0 += 1

            except tf.errors.OutOfRangeError:

                # Save predicted cloud
                new_min = np.min(dataset.min_potentials['validation'])
                print('Epoch {:3d}, end. Min potential = {:.1f}'.format(
                    epoch_ind, new_min))

                if last_min + 1 < new_min:

                    # Update last_min
                    last_min += 1

                    # Show vote results (On subcloud so it is not the good values here)
                    print('\nConfusion on sub clouds')
                    Confs = []
                    for i_test in range(dataset.num_validation):

                        # Insert false columns for ignored labels
                        probs = self.test_probs[i_test]
                        for l_ind, label_value in enumerate(
                                dataset.label_values):
                            if label_value in dataset.ignored_labels:
                                probs = np.insert(probs, l_ind, 0, axis=1)

                        # Predicted labels
                        preds = dataset.label_values[np.argmax(
                            probs, axis=1)].astype(np.int32)

                        # Targets
                        targets = dataset.input_labels['validation'][i_test]

                        # Confs
                        Confs += [
                            confusion_matrix(targets, preds,
                                             dataset.label_values)
                        ]

                    # Regroup confusions
                    C = np.sum(np.stack(Confs), axis=0).astype(np.float32)

                    # Remove ignored labels from confusions
                    for l_ind, label_value in reversed(
                            list(enumerate(dataset.label_values))):
                        if label_value in dataset.ignored_labels:
                            C = np.delete(C, l_ind, axis=0)
                            C = np.delete(C, l_ind, axis=1)

                    # Rescale with the right number of point per class
                    C *= np.expand_dims(
                        val_proportions / (np.sum(C, axis=1) + 1e-6), 1)

                    # Compute IoUs
                    IoUs = IoU_from_confusions(C)
                    mIoU = np.mean(IoUs)
                    s = '{:5.2f} | '.format(100 * mIoU)
                    for IoU in IoUs:
                        s += '{:5.2f} '.format(100 * IoU)
                    print(s + '\n')

                    if int(np.ceil(new_min)) % 4 == 0:

                        # Project predictions
                        print('\nReproject Vote #{:d}'.format(
                            int(np.floor(new_min))))
                        t1 = time.time()
                        files = dataset.train_files
                        i_val = 0
                        proj_probs = []
                        for i, file_path in enumerate(files):
                            if dataset.all_splits[
                                    i] == dataset.validation_split:

                                # Reproject probs on the evaluations points
                                probs = self.test_probs[i_val][
                                    dataset.validation_proj[i_val], :]
                                proj_probs += [probs]
                                i_val += 1

                        t2 = time.time()
                        print('Done in {:.1f} s\n'.format(t2 - t1))

                        # Show vote results
                        print('Confusion on full clouds')
                        t1 = time.time()
                        Confs = []
                        for i_test in range(dataset.num_validation):

                            # Insert false columns for ignored labels
                            for l_ind, label_value in enumerate(
                                    dataset.label_values):
                                if label_value in dataset.ignored_labels:
                                    proj_probs[i_test] = np.insert(
                                        proj_probs[i_test], l_ind, 0, axis=1)

                            # Get the predicted labels
                            preds = dataset.label_values[np.argmax(
                                proj_probs[i_test], axis=1)].astype(np.int32)

                            # Confusion
                            targets = dataset.validation_labels[i_test]
                            Confs += [
                                confusion_matrix(targets, preds,
                                                 dataset.label_values)
                            ]

                        t2 = time.time()
                        print('Done in {:.1f} s\n'.format(t2 - t1))

                        # Regroup confusions
                        C = np.sum(np.stack(Confs), axis=0)

                        # Remove ignored labels from confusions
                        for l_ind, label_value in reversed(
                                list(enumerate(dataset.label_values))):
                            if label_value in dataset.ignored_labels:
                                C = np.delete(C, l_ind, axis=0)
                                C = np.delete(C, l_ind, axis=1)

                        IoUs = IoU_from_confusions(C)
                        mIoU = np.mean(IoUs)
                        s = '{:5.2f} | '.format(100 * mIoU)
                        for IoU in IoUs:
                            s += '{:5.2f} '.format(100 * IoU)
                        print('-' * len(s))
                        print(s)
                        print('-' * len(s) + '\n')

                        # Save predictions
                        print('Saving clouds')
                        t1 = time.time()
                        files = dataset.train_files
                        i_test = 0
                        for i, file_path in enumerate(files):
                            if dataset.all_splits[
                                    i] == dataset.validation_split:

                                # Get points
                                points = dataset.load_evaluation_points(
                                    file_path)

                                # Get the predicted labels
                                preds = dataset.label_values[np.argmax(
                                    proj_probs[i_test],
                                    axis=1)].astype(np.int32)

                                # Project potentials on original points
                                pots = dataset.potentials['validation'][
                                    i_test][dataset.validation_proj[i_test]]

                                # Save plys
                                cloud_name = file_path.split('/')[-1]
                                test_name = join(test_path, 'val_predictions',
                                                 cloud_name)
                                write_ply(test_name, [
                                    points, preds, pots,
                                    dataset.validation_labels[i_test]
                                ], ['x', 'y', 'z', 'preds', 'pots', 'gt'])
                                test_name2 = join(test_path, 'val_probs',
                                                  cloud_name)
                                prob_names = [
                                    '_'.join(
                                        dataset.label_to_names[label].split())
                                    for label in dataset.label_values
                                ]
                                write_ply(test_name2,
                                          [points, proj_probs[i_test]],
                                          ['x', 'y', 'z'] + prob_names)
                                i_test += 1
                        t2 = time.time()
                        print('Done in {:.1f} s\n'.format(t2 - t1))

                self.sess.run(dataset.val_init_op)
                epoch_ind += 1
                i0 = 0
                continue

        return
示例#26
0
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):

    # Kernel directory
    kernel_dir = 'kernels/dispositions'
    if not exists(kernel_dir):
        makedirs(kernel_dir)

    # To many points switch to Lloyds
    if num_kpoints > 30:
        lloyd = True

    # Kernel_file
    kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))

    # Check if already done
    if not exists(kernel_file):
        if lloyd:
            # Create kernels
            kernel_points = spherical_Lloyd(1.0,
                                            num_kpoints,
                                            dimension=dimension,
                                            fixed=fixed,
                                            verbose=0)

        else:
            # Create kernels
            kernel_points, grad_norms = kernel_point_optimization_debug(1.0,
                                                                        num_kpoints,
                                                                        num_kernels=100,
                                                                        dimension=dimension,
                                                                        fixed=fixed,
                                                                        verbose=0)

            # Find best candidate
            best_k = np.argmin(grad_norms[-1, :])

            # Save points
            kernel_points = kernel_points[best_k, :, :]

        write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])

    else:
        data = read_ply(kernel_file)
        kernel_points = np.vstack((data['x'], data['y'], data['z'])).T

    # Random roations for the kernel
    # N.B. 4D random rotations not supported yet
    R = np.eye(dimension)
    theta = np.random.rand() * 2 * np.pi
    if dimension == 2:
        if fixed != 'vertical':
            c, s = np.cos(theta), np.sin(theta)
            R = np.array([[c, -s], [s, c]], dtype=np.float32)

    elif dimension == 3:
        if fixed != 'vertical':
            c, s = np.cos(theta), np.sin(theta)
            R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)

        else:
            phi = (np.random.rand() - 0.5) * np.pi

            # Create the first vector in carthesian coordinates
            u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])

            # Choose a random rotation angle
            alpha = np.random.rand() * 2 * np.pi

            # Create the rotation matrix with this vector and angle
            R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]

            R = R.astype(np.float32)

    # Add a small noise
    kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)

    # Scale kernels
    kernel_points = radius * kernel_points

    # Rotate kernels
    kernel_points = np.matmul(kernel_points, R)

    return kernel_points.astype(np.float32)
示例#27
0
    def check_input_pipeline_colors(self, config):

        # Create a session for running Ops on the Graph.
        cProto = tf.ConfigProto()
        cProto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=cProto)

        # Init variables
        self.sess.run(tf.global_variables_initializer())

        # Initialise iterator with train data
        self.sess.run(self.train_init_op)

        # Run some epochs
        t0 = time.time()
        mean_dt = np.zeros(2)
        epoch = 0
        training_step = 0
        while epoch < 100:

            try:
                # Run one step of the model.
                t = [time.time()]
                ops = self.flat_inputs

                # Get next inputs
                np_flat_inputs = self.sess.run(ops)
                t += [time.time()]

                # Restructure flatten inputs
                stacked_points = np_flat_inputs[:config.num_layers]
                stacked_colors = np_flat_inputs[-9]
                batches = np_flat_inputs[-7]
                stacked_labels = np_flat_inputs[-5]

                # Extract a point cloud and its color to save
                max_ind = np.max(batches)
                for b_i, b in enumerate(batches):

                    # Eliminate shadow indices
                    b = b[b < max_ind-0.5]

                    # Get points and colors (only for the concerned parts)
                    points = stacked_points[0][b]
                    colors = stacked_colors[b]
                    labels = stacked_labels[b]

                    write_ply('S3DIS_input_{:d}.ply'.format(b_i),
                              [points, colors[:, 1:4], labels],
                              ['x', 'y', 'z', 'red', 'green', 'blue', 'labels'])

                a = 1/0



                t += [time.time()]

                # Average timing
                mean_dt = 0.01 * mean_dt + 0.99 * (np.array(t[1:]) - np.array(t[:-1]))

                training_step += 1

            except tf.errors.OutOfRangeError:
                print('End of train dataset')
                self.sess.run(self.train_init_op)
                epoch += 1

        return
示例#28
0
def load_kernels(radius, num_kpoints, num_kernels, dimension, fixed):

    # Number of tries in the optimization process, to ensure we get the most stable disposition
    num_tries = 100

    # Kernel directory
    kernel_dir = 'kernels/dispositions'
    if not exists(kernel_dir):
        makedirs(kernel_dir)

    # Kernel_file
    if dimension == 3:
        kernel_file = join(kernel_dir,
                           'k_{:03d}_{:s}.ply'.format(num_kpoints, fixed))
    elif dimension == 2:
        kernel_file = join(kernel_dir,
                           'k_{:03d}_{:s}_2D.ply'.format(num_kpoints, fixed))
    else:
        raise ValueError('Unsupported dimpension of kernel : ' +
                         str(dimension))

    # Check if already done
    if not exists(kernel_file):

        # Create kernels
        kernel_points, grad_norms = kernel_point_optimization_debug(
            1.0,
            num_kpoints,
            num_kernels=num_tries,
            dimension=dimension,
            fixed=fixed,
            verbose=0)

        # Find best candidate
        best_k = np.argmin(grad_norms[-1, :])

        # Save points
        original_kernel = kernel_points[best_k, :, :]
        write_ply(kernel_file, original_kernel, ['x', 'y', 'z'])

    else:
        data = read_ply(kernel_file)
        original_kernel = np.vstack((data['x'], data['y'], data['z'])).T

    # N.B. 2D kernels are not supported yet
    if dimension == 2:
        return original_kernel

    # Random rotations depending of the fixed points
    if fixed == 'verticals':

        # Create random rotations
        thetas = np.random.rand(num_kernels) * 2 * np.pi
        c, s = np.cos(thetas), np.sin(thetas)
        R = np.zeros((num_kernels, 3, 3), dtype=np.float32)
        R[:, 0, 0] = c
        R[:, 1, 1] = c
        R[:, 2, 2] = 1
        R[:, 0, 1] = s
        R[:, 1, 0] = -s

        # Scale kernels
        original_kernel = radius * np.expand_dims(original_kernel, 0)

        # Rotate kernels
        kernels = np.matmul(original_kernel, R)

    else:

        # Create random rotations
        u = np.ones((num_kernels, 3))
        v = np.ones((num_kernels, 3))
        wrongs = np.abs(np.sum(u * v, axis=1)) > 0.99
        while np.any(wrongs):
            new_u = np.random.rand(num_kernels, 3) * 2 - 1
            new_u = new_u / np.expand_dims(
                np.linalg.norm(new_u, axis=1) + 1e-9, -1)
            u[wrongs, :] = new_u[wrongs, :]
            new_v = np.random.rand(num_kernels, 3) * 2 - 1
            new_v = new_v / np.expand_dims(
                np.linalg.norm(new_v, axis=1) + 1e-9, -1)
            v[wrongs, :] = new_v[wrongs, :]
            wrongs = np.abs(np.sum(u * v, axis=1)) > 0.99

        # Make v perpendicular to u
        v -= np.expand_dims(np.sum(u * v, axis=1), -1) * u
        v = v / np.expand_dims(np.linalg.norm(v, axis=1) + 1e-9, -1)

        # Last rotation vector
        w = np.cross(u, v)
        R = np.stack((u, v, w), axis=-1)

        # Scale kernels
        original_kernel = radius * np.expand_dims(original_kernel, 0)

        # Rotate kernels
        kernels = np.matmul(original_kernel, R)

        # Add a small noise
        kernels = kernels
        kernels = kernels + np.random.normal(scale=radius * 0.01,
                                             size=kernels.shape)

    return kernels
示例#29
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory (Load KDTree for neighbors searches
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError('subsampling_parameter too low (should be over 1 cm')

        # Create path for files
        tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter))
        if not exists(tree_path):
            makedirs(tree_path)

        # All training and test files
        files = np.hstack((self.train_files, self.test_files))

        # Initiate containers
        self.input_trees = {'training': [], 'validation': [], 'test': []}
        self.input_colors = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': []}

        # Advanced display
        N = len(files)
        progress_n = 30
        fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'
        print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter))

        for i, file_path in enumerate(files):

            # Restart timer
            t0 = time.time()

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]
            if 'train' in cloud_folder:
                if self.all_splits[i] == self.validation_split:
                    cloud_split = 'validation'
                else:
                    cloud_split = 'training'
            else:
                cloud_split = 'test'

            # Name of the input files
            KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))

            # Check if inputs have already been computed
            if isfile(KDTree_file):

                # read ply with data
                data = read_ply(sub_ply_file)
                sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
                if cloud_split == 'test':
                    sub_labels = None
                else:
                    sub_labels = data['class']

                # Read pkl with search tree
                with open(KDTree_file, 'rb') as f:
                    search_tree = pickle.load(f)

            else:

                # Read ply file
                data = read_ply(file_path)
                points = np.vstack((data['x'], data['y'], data['z'])).T
                colors = np.vstack((data['red'], data['green'], data['blue'])).T
                if cloud_split == 'test':
                    int_features = None
                else:
                    int_features = data['class']

                # Subsample cloud
                sub_data = grid_subsampling(points,
                                            features=colors,
                                            labels=int_features,
                                            sampleDl=subsampling_parameter)

                # Rescale float color and squeeze label
                sub_colors = sub_data[1] / 255

                # Get chosen neighborhoods
                search_tree = KDTree(sub_data[0], leaf_size=50)

                # Save KDTree
                with open(KDTree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                # Save ply
                if cloud_split == 'test':
                    sub_labels = None
                    write_ply(sub_ply_file,
                              [sub_data[0], sub_colors],
                              ['x', 'y', 'z', 'red', 'green', 'blue'])
                else:
                    sub_labels = np.squeeze(sub_data[2])
                    write_ply(sub_ply_file,
                              [sub_data[0], sub_colors, sub_labels],
                              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            # Fill data containers
            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_colors]
            if cloud_split in ['training', 'validation']:
                self.input_labels[cloud_split] += [sub_labels]

            print('', end='\r')
            print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True)

        # Get number of clouds
        self.num_training = len(self.input_trees['training'])
        self.num_validation = len(self.input_trees['validation'])
        self.num_test = len(self.input_trees['test'])

        # Get validation and test reprojection indices
        self.validation_proj = []
        self.validation_labels = []
        self.test_proj = []
        self.test_labels = []
        i_val = 0
        i_test = 0

        # Advanced display
        N = self.num_validation + self.num_test
        print('', end='\r')
        print(fmt_str.format('#' * progress_n, 100), flush=True)
        print('\nPreparing reprojection indices for validation and test')

        for i, file_path in enumerate(files):

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]

            # Validation projection and labels
            if 'train' in cloud_folder and self.all_splits[i] == self.validation_split:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:

                    # Get original points
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T
                    labels = data['class']

                    # Compute projection inds
                    proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.validation_proj += [proj_inds]
                self.validation_labels += [labels]
                i_val += 1

            # Test projection
            if '-8' in cloud_folder:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:

                    # Get original points
                    full_ply_path = file_path.split('/')
                    full_ply_path[-3] = 'ply_full'
                    full_ply_path = '/'.join(full_ply_path)
                    data = read_ply(full_ply_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T
                    labels = np.zeros(points.shape[0], dtype=np.int32)

                    # Compute projection inds
                    proj_inds = np.squeeze(self.input_trees['test'][i_test].query(points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.test_proj += [proj_inds]
                self.test_labels += [labels]
                i_test += 1

            print('', end='\r')
            print(fmt_str.format('#' * (((i_val + i_test) * progress_n) // N), 100 * (i_val + i_test) / N),
                  end='',
                  flush=True)

        print('\n')

        return
        print('Computing elevation images')
        
        
        kx = 0.1
        ky = 0.1
        
        t0 = time.time()
        #Get the elevation image as well as the tables allowing to reverse the projection
        #For future reconstruction
        elevation_image, reverse_projection = get_elevation(points, kx, ky) 
        t1 = time.time()    
        
        print('Elevation images computed in {:.3f} seconds'.format(t1 - t0))
        
        
        write_ply('elevation_image.ply', [elevation_image],
                  ['x', 'y', 'max_elevation', 'min_elevation', 'relative_elevation', 'accumulation'])
#%%        
    if True:
        '''Observation of the elevation  images'''
        
        #Turn the elevation arrays into images
        im_max, elevation_mask = make_image(elevation_image, elevation_image, im_type = 0)
        im_min, msk = make_image(elevation_image, elevation_image, im_type = 1)
        im_range, msk = make_image(elevation_image, elevation_image, im_type = 2)
        im_accum, msk = make_image(elevation_image, elevation_image, im_type = 3)
        
        #Fill holes of the max elevation image
        im = np.copy(im_max)
        im[1:-1, 1:-1] = im_max.max()
        mask = im_max
        filled_max = skmorpho.reconstruction(im, mask, method='erosion')