Ejemplo n.º 1
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory (Load KDTree for neighbors searches
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        # Create path for files
        tree_path = join(self.path,
                         'input_{:.3f}'.format(subsampling_parameter))
        if not exists(tree_path):
            makedirs(tree_path)

        # List of files to process
        ply_path = join(self.path, self.train_path)
        self.train_files = [
            join(ply_path, f + '.ply') for f in self.cloud_names
        ]

        # Initiate containers
        self.input_trees = {'training': [], 'validation': []}
        self.input_colors = {'training': [], 'validation': []}
        self.input_labels = {'training': [], 'validation': []}

        for i, file_path in enumerate(self.train_files):

            # Restart timer
            t0 = time.time()

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            if self.all_splits[i] == self.validation_split:
                cloud_split = 'validation'
            else:
                cloud_split = 'training'

            # Name of the input files
            KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))

            # Check if inputs have already been computed
            if isfile(KDTree_file):
                print('\nFound KDTree for cloud {:s}, subsampled at {:.3f}'.
                      format(cloud_name, subsampling_parameter))

                # read ply with data
                data = read_ply(sub_ply_file)
                sub_colors = np.vstack(
                    (data['red'], data['green'], data['blue'])).T
                sub_labels = data['class']

                # Read pkl with search tree
                with open(KDTree_file, 'rb') as f:
                    search_tree = pickle.load(f)

            else:
                print(
                    '\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}'.
                    format(cloud_name, subsampling_parameter))

                # Read ply file
                data = read_ply(file_path)
                points = np.vstack((data['x'], data['y'], data['z'])).T
                colors = np.vstack(
                    (data['red'], data['green'], data['blue'])).T
                labels = data['class']

                # Subsample cloud
                sub_points, sub_colors, sub_labels = grid_subsampling(
                    points,
                    features=colors,
                    labels=labels,
                    sampleDl=subsampling_parameter)

                # Rescale float color and squeeze label
                sub_colors = sub_colors / 255
                sub_labels = np.squeeze(sub_labels)

                # Get chosen neighborhoods
                search_tree = KDTree(sub_points, leaf_size=50)

                # Save KDTree
                with open(KDTree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                # Save ply
                write_ply(sub_ply_file, [sub_points, sub_colors, sub_labels],
                          ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            # Fill data containers
            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_colors]
            self.input_labels[cloud_split] += [sub_labels]

            size = sub_colors.shape[0] * 4 * 7
            print('{:.1f} MB loaded in {:.1f}s'.format(size * 1e-6,
                                                       time.time() - t0))

        print('\nPreparing reprojection indices for testing')

        # Get number of clouds
        self.num_training = len(self.input_trees['training'])
        self.num_validation = len(self.input_trees['validation'])

        # Get validation and test reprojection indices
        self.validation_proj = []
        self.validation_labels = []
        i_val = 0
        for i, file_path in enumerate(self.train_files):

            # Restart timer
            t0 = time.time()

            # Get info on this cloud
            cloud_name = file_path.split('/')[-1][:-4]

            # Validation projection and labels
            if self.all_splits[i] == self.validation_split:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T
                    labels = data['class']

                    # Compute projection inds
                    proj_inds = np.squeeze(
                        self.input_trees['validation'][i_val].query(
                            points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.validation_proj += [proj_inds]
                self.validation_labels += [labels]
                i_val += 1
                print('{:s} done in {:.1f}s'.format(cloud_name,
                                                    time.time() - t0))

        print()

        return
    def load_sub_sampled_clouds(self, sub_grid_size):
        tree_path = os.path.join(self.path,
                                 'input_{:.3f}'.format(sub_grid_size))
        files = np.hstack((self.train_files, self.val_files, self.test_files))
        for i, file_path in enumerate(files):
            cloud_name = file_path.split('/')[-1][:-4]
            print('Load_pc_' + str(i) + ': ' + cloud_name)
            if file_path in self.val_files:
                cloud_split = 'validation'
            elif file_path in self.train_files:
                cloud_split = 'training'
            else:
                cloud_split = 'test'

            # Name of the input files
            kd_tree_file = os.path.join(tree_path,
                                        '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = os.path.join(tree_path,
                                        '{:s}.ply'.format(cloud_name))

            # read ply with data
            data = read_ply(sub_ply_file)
            sub_colors = np.vstack(
                (data['red'], data['green'], data['blue'])).T
            if cloud_split == 'test':
                sub_labels = None
            else:
                sub_labels = data['class']

            # Read pkl with search tree
            with open(kd_tree_file, 'rb') as f:
                search_tree = pickle.load(f)

            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_colors]
            if cloud_split in ['training', 'validation']:
                self.input_labels[cloud_split] += [sub_labels]

        # Get validation and test re_projection indices
        print('\nPreparing reprojection indices for validation and test')

        for i, file_path in enumerate(files):
            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]

            # Validation projection and labels
            if file_path in self.val_files:
                proj_file = os.path.join(tree_path,
                                         '{:s}_proj.pkl'.format(cloud_name))
                with open(proj_file, 'rb') as f:
                    proj_idx, labels = pickle.load(f)
                self.val_proj += [proj_idx]
                self.val_labels += [labels]

            # Test projection
            if file_path in self.test_files:
                proj_file = os.path.join(tree_path,
                                         '{:s}_proj.pkl'.format(cloud_name))
                with open(proj_file, 'rb') as f:
                    proj_idx, labels = pickle.load(f)
                self.test_proj += [proj_idx]
                self.test_labels += [labels]
        print('finished')

        for i, tree in enumerate(self.input_trees[self.mode]):
            self.possibility[self.mode] += [
                np.random.rand(tree.data.shape[0]) * 1e-3
            ]
            self.min_possibility[self.mode] += [
                float(np.min(self.possibility[self.mode][-1]))
            ]

        if self.mode != 'test':
            _, num_class_total = np.unique(np.hstack(
                self.input_labels[self.mode]),
                                           return_counts=True)
            self.class_weight[self.mode] += [
                np.squeeze([num_class_total / np.sum(num_class_total)], axis=0)
            ]
Ejemplo n.º 3
0
def create_kernel_points(scope, radius, num_kpoints, num_kernels, dimension,
                         fixed):
    # Number of tries in the optimization process, to ensure we get the most stable disposition
    num_tries = 100

    # Kernel directory
    kernel_dir = os.path.join(ROOT_DIR, 'kernels', 'dispositions')
    if not os.path.exists(kernel_dir):
        os.makedirs(kernel_dir)

    prefix_name = scope.name.split('Model/')[-1].replace('/', '_')

    if dimension == 3:
        specific_kernel_file = os.path.join(
            kernel_dir,
            'sk_{}_{:04f}_{:03d}_{:s}.npy'.format(prefix_name, radius,
                                                  num_kpoints, fixed))
    elif dimension == 2:
        specific_kernel_file = os.path.join(
            kernel_dir,
            'sk_{}_{:04f}_{:03d}_{:s}_2D.npy'.format(prefix_name, radius,
                                                     num_kpoints, fixed))
    else:
        raise ValueError('Unsupported dimpension of kernel : ' +
                         str(dimension))

    if os.path.exists(specific_kernel_file):
        kernels = np.load(specific_kernel_file)
    else:
        # Kernel_file
        if dimension == 3:
            kernel_file = os.path.join(
                kernel_dir, 'k_{:03d}_{:s}.ply'.format(num_kpoints, fixed))
        elif dimension == 2:
            kernel_file = os.path.join(
                kernel_dir, 'k_{:03d}_{:s}_2D.ply'.format(num_kpoints, fixed))
        else:
            raise ValueError('Unsupported dimpension of kernel : ' +
                             str(dimension))

        # Check if already done
        if not os.path.exists(kernel_file):

            # Create kernels
            kernel_points, grad_norms = kernel_point_optimization_debug(
                1.0,
                num_kpoints,
                num_kernels=num_tries,
                dimension=dimension,
                fixed=fixed,
                verbose=0)

            # Find best candidate
            best_k = np.argmin(grad_norms[-1, :])

            # Save points
            original_kernel = kernel_points[best_k, :, :]
            write_ply(kernel_file, original_kernel, ['x', 'y', 'z'])

        else:
            data = read_ply(kernel_file)
            original_kernel = np.vstack((data['x'], data['y'], data['z'])).T

        # N.B. 2D kernels are not supported yet
        if dimension == 2:
            return original_kernel

        # Random rotations depending of the fixed points
        if fixed == 'verticals':

            # Create random rotations
            thetas = np.random.rand(num_kernels) * 2 * np.pi
            c, s = np.cos(thetas), np.sin(thetas)
            R = np.zeros((num_kernels, 3, 3), dtype=np.float32)
            R[:, 0, 0] = c
            R[:, 1, 1] = c
            R[:, 2, 2] = 1
            R[:, 0, 1] = s
            R[:, 1, 0] = -s

            # Scale kernels
            original_kernel = radius * np.expand_dims(original_kernel, 0)

            # Rotate kernels
            kernels = np.matmul(original_kernel, R)

        else:

            # Create random rotations
            u = np.ones((num_kernels, 3))
            v = np.ones((num_kernels, 3))
            wrongs = np.abs(np.sum(u * v, axis=1)) > 0.99
            while np.any(wrongs):
                new_u = np.random.rand(num_kernels, 3) * 2 - 1
                new_u = new_u / np.expand_dims(
                    np.linalg.norm(new_u, axis=1) + 1e-9, -1)
                u[wrongs, :] = new_u[wrongs, :]
                new_v = np.random.rand(num_kernels, 3) * 2 - 1
                new_v = new_v / np.expand_dims(
                    np.linalg.norm(new_v, axis=1) + 1e-9, -1)
                v[wrongs, :] = new_v[wrongs, :]
                wrongs = np.abs(np.sum(u * v, axis=1)) > 0.99

            # Make v perpendicular to u
            v -= np.expand_dims(np.sum(u * v, axis=1), -1) * u
            v = v / np.expand_dims(np.linalg.norm(v, axis=1) + 1e-9, -1)

            # Last rotation vector
            w = np.cross(u, v)
            R = np.stack((u, v, w), axis=-1)

            # Scale kernels
            original_kernel = radius * np.expand_dims(original_kernel, 0)

            # Rotate kernels
            kernels = np.matmul(original_kernel, R)

            # Add a small noise
            kernels = kernels
            kernels = kernels + np.random.normal(scale=radius * 0.01,
                                                 size=kernels.shape)

        np.save(specific_kernel_file, kernels)

    return kernels
Ejemplo n.º 4
0
if __name__ == '__main__':

    # Transformation estimation
    # *************************
    #

    # If statement to skip this part if wanted
    if False:

        # Cloud paths
        bunny_o_path = '../data/bunny_original.ply'
        bunny_r_path = '../data/bunny_returned.ply'

        # Load clouds
        bunny_original_file = read_ply(bunny_o_path)
        bunny_returned_file = read_ply(bunny_r_path)

        bunny_original_pts = np.vstack(
            (bunny_original_file['x'], bunny_original_file['y'],
             bunny_original_file['z']))
        bunny_returned_pts = np.vstack(
            (bunny_returned_file['x'], bunny_returned_file['y'],
             bunny_returned_file['z']))

        # Find the best transformation
        R, T = best_rigid_transform(bunny_original_pts, bunny_returned_pts)

        # Apply the tranformation
        bunny_transformed_pts = R.dot(bunny_original_pts) + T
Ejemplo n.º 5
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory (Load KDTree for neighbors searches
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError('subsampling_parameter too low (should be over 1 cm')

        # Create path for files
        tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter))
        if not exists(tree_path):
            makedirs(tree_path)

        # All training and test files
        files = np.hstack((self.train_files, self.test_files))

        # Initiate containers
        self.input_trees = {'training': [], 'validation': [], 'test': []}
        self.input_colors = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': []}

        # Advanced display
        N = len(files)
        progress_n = 30
        fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'
        print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter))

        for i, file_path in enumerate(files):

            # Restart timer
            t0 = time.time()

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]
            if 'train' in cloud_folder:
                if self.all_splits[i] == self.validation_split:
                    cloud_split = 'validation'
                else:
                    cloud_split = 'training'
            else:
                cloud_split = 'test'

            if (cloud_split != 'test' and self.load_test) or (cloud_split == 'test' and not self.load_test):
                continue

            # Name of the input files
            KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))

            # Check if inputs have already been computed
            if isfile(KDTree_file):

                # read ply with data
                data = read_ply(sub_ply_file)
                sub_reflectance = np.expand_dims(data['reflectance'], 1)
                if cloud_split == 'test':
                    sub_labels = None
                else:
                    sub_labels = data['class']

                # Read pkl with search tree
                with open(KDTree_file, 'rb') as f:
                    search_tree = pickle.load(f)

            else:

                # Read ply file
                data = read_ply(file_path)
                points = np.vstack((data['x'], data['y'], data['z'])).astype(np.float32).T
                reflectance = np.expand_dims(data['reflectance'], 1).astype(np.float32)
                if cloud_split == 'test':
                    int_features = None
                else:
                    int_features = data['class']

                # Saturate reflectance
                reflectance = np.minimum(reflectance, 50.0)

                # Subsample cloud
                sub_data = grid_subsampling(points,
                                            features=reflectance,
                                            labels=int_features,
                                            sampleDl=subsampling_parameter)

                # Rescale and saturate float reflectance
                sub_reflectance = sub_data[1] / 50.0

                # Get chosen neighborhoods
                search_tree = KDTree(sub_data[0], leaf_size=50)

                # Save KDTree
                with open(KDTree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                # Save ply
                if cloud_split == 'test':
                    sub_labels = None
                    write_ply(sub_ply_file,
                              [sub_data[0], sub_reflectance],
                              ['x', 'y', 'z', 'reflectance'])
                else:
                    sub_labels = np.squeeze(sub_data[2])
                    write_ply(sub_ply_file,
                              [sub_data[0], sub_reflectance, sub_labels],
                              ['x', 'y', 'z', 'reflectance', 'class'])

            # Fill data containers
            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_reflectance]
            if cloud_split in ['training', 'validation']:
                self.input_labels[cloud_split] += [sub_labels]

            print('', end='\r')
            print(fmt_str.format('#' * (((i+1) * progress_n) // N), 100 * (i+1) / N), end='', flush=True)

        # Get number of clouds
        self.num_training = len(self.input_trees['training'])
        self.num_validation = len(self.input_trees['validation'])
        self.num_test = len(self.input_trees['test'])

        # Get validation and test reprojection indices
        self.validation_proj = []
        self.validation_labels = []
        self.test_proj = []
        self.test_labels = []
        i_val = 0
        i_test = 0

        # Advanced display
        N = max(self.num_validation + self.num_test, 1)
        print('', end='\r')
        print(fmt_str.format('#' * progress_n, 100), flush=True)
        print('\nPreparing reprojection indices for validation and test')

        for i, file_path in enumerate(files):

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]

            # Validation projection and labels
            if (not self.load_test) and 'train' in cloud_folder and self.all_splits[i] == self.validation_split:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:

                    # Get original points
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T
                    labels = data['class']

                    # Compute projection inds
                    proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.validation_proj += [proj_inds]
                self.validation_labels += [labels]
                i_val += 1

            # Test projection
            if self.load_test and 'test' in cloud_folder:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds = pickle.load(f)
                else:

                    # Get original points
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T

                    # Compute projection inds
                    proj_inds = np.squeeze(self.input_trees['test'][i_test].query(points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump(proj_inds, f)

                self.test_proj += [proj_inds]
                self.test_labels += [np.zeros(0, dtype=np.int32)]
                i_test += 1

            print('', end='\r')
            print(fmt_str.format('#' * (((i_val + i_test) * progress_n) // N), 100 * (i_val + i_test) / N),
                  end='',
                  flush=True)

        print('\n')

        return
Ejemplo n.º 6
0
label_names = {
    0: 'Unclassified',
    1: 'Ground',
    2: 'Building',
    3: 'Poles',
    4: 'Pedestrians',
    5: 'Cars',
    6: 'Vegetation'
}

#%% Build training data

training_features = np.empty((0, 4))
training_labels = np.empty((0, ))

cloud_ply = read_ply(training_path)
points = np.vstack((cloud_ply['x'], cloud_ply['y'], cloud_ply['z'])).T
labels = cloud_ply['class']

# Initiate training indices array
training_inds = np.empty(0, dtype=np.int32)

for label, name in label_names.items():
    if label == 0 or label == 1:
        continue
    label_inds = np.where(labels == label)[0]
    if len(label_inds) <= num_per_class:
        print('Data harvesting on class: {}'.format(name))
        training_inds = np.hstack((training_inds, label_inds))
    else:
        random_choice = np.random.choice(len(label_inds),
Ejemplo n.º 7
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        # MY LABELS ARE THE COMPLETE GT POINT CLOUDS
        # Initiate containers
        self.partial_points = {'train': [], 'valid': [], 'test': []}
        self.complete_points = {'train': [], 'valid': [], 'test': []}
        self.categories = {'train': [], 'valid': [], 'test': []}

        for split_type in ['train', 'valid', 'test']:

            # Restart timer
            t0 = time.time()

            # Load wanted points if possible
            print('\nLoading %s points' % split_type)
            filename = join(
                self.data_path,
                '{0:s}_{1:.3f}_record.pkl'.format(split_type,
                                                  subsampling_parameter))

            if exists(filename):
                with open(filename, 'rb') as file:
                    self.partial_points[split_type], \
                    self.complete_points[split_type], \
                    self.categories[split_type] = pickle.load(file)

            # Else compute them from original points
            else:
                # Collect complete & partial file data
                with open(join(self.data_path,
                               '%s.list' % split_type)) as file:
                    model_list = file.read().splitlines()
                    file.close()

                for i, cat_model_id in enumerate(model_list):
                    cat_id, model_id = cat_model_id.split('/')

                    # Read complete ply data, if subsample param exists, save subsampled complete pc, else save original
                    complete_data = read_ply(
                        join(self.data_path, split_type, 'complete', cat_id,
                             "%s.ply" % model_id))
                    complete_points = np.vstack(
                        (complete_data['x'], complete_data['y'],
                         complete_data['z'])).astype(np.float32).T

                    # GT prob should not be grid subsampled...no reason for that to happen...
                    # if subsampling_parameter > 0:
                    #     sub_complete_points = grid_subsampling(complete_points, sampleDl=subsampling_parameter)

                    # For each scan, read partial ply data, if subsample param exists, save subsampled partial pc
                    for s in range(self.num_scans):
                        partial_data = read_ply(
                            join(self.data_path, split_type, 'partial', cat_id,
                                 model_id, "%s.ply" % s))
                        partial_points = np.vstack(
                            (partial_data['x'], partial_data['y'],
                             partial_data['z'])).astype(np.float32).T

                        if subsampling_parameter > 0:
                            sub_partial_points = grid_subsampling(
                                partial_points, sampleDl=subsampling_parameter)
                            self.partial_points[split_type] += [
                                sub_partial_points
                            ]
                            # complete points & synsets will be duplicated/matched for each scan
                            self.complete_points[split_type] += [
                                complete_points
                            ]
                            self.categories[split_type] += [cat_id]
                        else:
                            self.partial_points[split_type] += [partial_points]
                            self.complete_points[split_type] += [
                                complete_points
                            ]
                            self.categories[split_type] += [cat_id]

                # Save split pickle for later use
                with open(filename, 'wb') as file:
                    pickle.dump((self.partial_points[split_type],
                                 self.complete_points[split_type],
                                 self.categories[split_type]), file)

            lengths = [p.shape[0] for p in self.partial_points[split_type]]
            lengths.extend(
                [p.shape[0] for p in self.complete_points[split_type]])
            sizes = [l * 4 * 3 for l in lengths]
            print('{:.1f} MB loaded in {:.1f}s'.format(
                np.sum(sizes) * 1e-6,
                time.time() - t0))

        self.num_train = len(self.categories['train'])
        self.num_valid = len(self.categories['valid'])
        self.num_test = len(self.categories['test'])
Ejemplo n.º 8
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        # Initiate containers
        self.input_points = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': [], 'test': []}
        self.input_point_labels = {
            'training': [],
            'validation': [],
            'test': []
        }

        ################
        # Training files
        ################

        # Restart timer
        t0 = time.time()

        # Load wanted points if possible
        print('\nLoading training points')
        filename = join(
            self.path, 'train_{:.3f}_record.pkl'.format(subsampling_parameter))

        if exists(filename):
            with open(filename, 'rb') as file:
                self.input_labels['training'], \
                self.input_points['training'], \
                self.input_point_labels['training'] = pickle.load(file)

        # Else compute them from original points
        else:

            # Collect training file names
            split_path = join(self.path, '{:s}_ply'.format('train'))
            names = [f[:-4] for f in listdir(split_path) if f[-4:] == '.ply']
            names = np.sort(names)

            # Collect point clouds
            for i, cloud_name in enumerate(names):
                data = read_ply(join(split_path, cloud_name + '.ply'))
                points = np.vstack((data['x'], data['y'], data['z'])).T
                point_labels = data['label']
                if subsampling_parameter > 0:
                    sub_points, sub_labels = grid_subsampling(
                        points,
                        labels=point_labels,
                        sampleDl=subsampling_parameter)
                    self.input_points['training'] += [sub_points]
                    self.input_point_labels['training'] += [sub_labels]
                else:
                    self.input_points['training'] += [points]
                    self.input_point_labels['training'] += [point_labels]

            # Get labels
            label_names = ['_'.join(n.split('_')[:-1]) for n in names]
            self.input_labels['training'] = np.array(
                [self.name_to_label[name] for name in label_names])

            # Collect Validation file names
            split_path = join(self.path, '{:s}_ply'.format('val'))
            names = [f[:-4] for f in listdir(split_path) if f[-4:] == '.ply']
            names = np.sort(names)

            # Collect point clouds
            for i, cloud_name in enumerate(names):
                data = read_ply(join(split_path, cloud_name + '.ply'))
                points = np.vstack((data['x'], data['y'], data['z'])).T
                point_labels = data['label']
                if subsampling_parameter > 0:
                    sub_points, sub_labels = grid_subsampling(
                        points,
                        labels=point_labels,
                        sampleDl=subsampling_parameter)
                    self.input_points['training'] += [sub_points]
                    self.input_point_labels['training'] += [sub_labels]
                else:
                    self.input_points['training'] += [points]
                    self.input_point_labels['training'] += [point_labels]

            # Get labels
            label_names = ['_'.join(n.split('_')[:-1]) for n in names]
            self.input_labels['training'] = np.hstack(
                (self.input_labels['training'],
                 np.array([self.name_to_label[name] for name in label_names])))

            # Save for later use
            with open(filename, 'wb') as file:
                pickle.dump((self.input_labels['training'],
                             self.input_points['training'],
                             self.input_point_labels['training']), file)

        lengths = [p.shape[0] for p in self.input_points['training']]
        sizes = [l * 4 * 3 for l in lengths]
        print('{:.1f} MB loaded in {:.1f}s'.format(
            np.sum(sizes) * 1e-6,
            time.time() - t0))

        ############
        # Test files
        ############

        # Restart timer
        t0 = time.time()

        # Load wanted points if possible
        print('\nLoading test points')
        filename = join(self.path,
                        'test_{:.3f}_record.pkl'.format(subsampling_parameter))
        if exists(filename):
            with open(filename, 'rb') as file:
                self.input_labels['test'], \
                self.input_points['test'], \
                self.input_point_labels['test'] = pickle.load(file)

        # Else compute them from original points
        else:

            # Collect test file names
            split_path = join(self.path, '{:s}_ply'.format('test'))
            names = [f[:-4] for f in listdir(split_path) if f[-4:] == '.ply']
            names = np.sort(names)

            # Collect point clouds
            for i, cloud_name in enumerate(names):
                data = read_ply(join(split_path, cloud_name + '.ply'))
                points = np.vstack((data['x'], data['y'], data['z'])).T
                point_labels = data['label']
                if subsampling_parameter > 0:
                    sub_points, sub_labels = grid_subsampling(
                        points,
                        labels=point_labels,
                        sampleDl=subsampling_parameter)
                    self.input_points['test'] += [sub_points]
                    self.input_point_labels['test'] += [sub_labels]
                else:
                    self.input_points['test'] += [points]
                    self.input_point_labels['test'] += [point_labels]

            # Get labels
            label_names = ['_'.join(n.split('_')[:-1]) for n in names]
            self.input_labels['test'] = np.array(
                [self.name_to_label[name] for name in label_names])

            # Save for later use
            with open(filename, 'wb') as file:
                pickle.dump(
                    (self.input_labels['test'], self.input_points['test'],
                     self.input_point_labels['test']), file)

        lengths = [p.shape[0] for p in self.input_points['test']]
        sizes = [l * 4 * 3 for l in lengths]
        print('{:.1f} MB loaded in {:.1f}s\n'.format(
            np.sum(sizes) * 1e-6,
            time.time() - t0))

        #######################################
        # Eliminate unconsidered object classes
        #######################################

        # Eliminate unconsidered classes
        if self.ShapeNetPartType in self.label_names:
            # Index of the wanted label
            wanted_label = self.name_to_label[self.ShapeNetPartType]

            # Manage training points
            boolean_mask = self.input_labels['training'] == wanted_label
            self.input_labels['training'] = self.input_labels['training'][
                boolean_mask]
            self.input_points['training'] = np.array(
                self.input_points['training'])[boolean_mask]
            self.input_point_labels['training'] = np.array(
                self.input_point_labels['training'])[boolean_mask]
            self.num_train = len(self.input_labels['training'])

            # Manage test points
            boolean_mask = self.input_labels['test'] == wanted_label
            self.input_labels['test'] = self.input_labels['test'][boolean_mask]
            self.input_points['test'] = np.array(
                self.input_points['test'])[boolean_mask]
            self.input_point_labels['test'] = np.array(
                self.input_point_labels['test'])[boolean_mask]
            self.num_test = len(self.input_labels['test'])

        # Change to 0-based labels
        self.input_point_labels['training'] = [
            p_l - 1 for p_l in self.input_point_labels['training']
        ]
        self.input_point_labels['test'] = [
            p_l - 1 for p_l in self.input_point_labels['test']
        ]

        # Test = validation
        self.input_labels['validation'] = self.input_labels['test']
        self.input_points['validation'] = self.input_points['test']
        self.input_point_labels['validation'] = self.input_point_labels['test']

        return
if __name__ == '__main__':

    # Transformation estimation
    # *************************
    #

    # If statement to skip this part if wanted
    if True:

        # Cloud paths
        bunny_o_path = '../data/bunny_original.ply'
        bunny_r_path = '../data/bunny_returned.ply'

        # Load clouds
        data_o = read_ply(bunny_o_path)
        ref_o = read_ply(bunny_r_path)
        data = np.vstack((data_o['x'], data_o['y'], data_o['z'])).T
        ref = np.vstack((ref_o['x'], ref_o['y'], ref_o['z'])).T

        # Find the best transformation
        R, T = best_rigid_transform(data, ref)
        # Apply the tranformation
        data_out = np.dot(R, data.T).T + T
        # Save cloud
        write_ply('../bunny_best_rigid_transformation.ply', [data_out],
                  ['x', 'y', 'z'])
        # Compute RMS
        N_temp = np.shape(data)[0]
        RMS = np.sqrt(1. / N_temp * np.sum((data - ref)**2))
        RMS_out = np.sqrt(1. / N_temp * np.sum((data_out - ref)**2))
Ejemplo n.º 10
0
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):

    # Kernel directory
    kernel_dir = 'kernels/dispositions'
    if not exists(kernel_dir):
        makedirs(kernel_dir)

    # To many points switch to Lloyds
    if num_kpoints > 30:
        lloyd = True

    # Kernel_file
    kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))

    # Check if already done
    if not exists(kernel_file):
        if lloyd:
            # Create kernels
            kernel_points = spherical_Lloyd(1.0,
                                            num_kpoints,
                                            dimension=dimension,
                                            fixed=fixed,
                                            verbose=0)

        else:
            # Create kernels
            kernel_points, grad_norms = kernel_point_optimization_debug(1.0,
                                                                        num_kpoints,
                                                                        num_kernels=100,
                                                                        dimension=dimension,
                                                                        fixed=fixed,
                                                                        verbose=0)

            # Find best candidate
            best_k = np.argmin(grad_norms[-1, :])

            # Save points
            kernel_points = kernel_points[best_k, :, :]

        write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])

    else:
        data = read_ply(kernel_file)
        kernel_points = np.vstack((data['x'], data['y'], data['z'])).T

    # Random roations for the kernel
    # N.B. 4D random rotations not supported yet
    R = np.eye(dimension)
    theta = np.random.rand() * 2 * np.pi
    if dimension == 2:
        if fixed != 'vertical':
            c, s = np.cos(theta), np.sin(theta)
            R = np.array([[c, -s], [s, c]], dtype=np.float32)

    elif dimension == 3:
        if fixed != 'vertical':
            c, s = np.cos(theta), np.sin(theta)
            R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)

        else:
            phi = (np.random.rand() - 0.5) * np.pi

            # Create the first vector in carthesian coordinates
            u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])

            # Choose a random rotation angle
            alpha = np.random.rand() * 2 * np.pi

            # Create the rotation matrix with this vector and angle
            R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]

            R = R.astype(np.float32)

    # Add a small noise
    kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)

    # Scale kernels
    kernel_points = radius * kernel_points

    # Rotate kernels
    kernel_points = np.matmul(kernel_points, R)

    return kernel_points.astype(np.float32)
Ejemplo n.º 11
0
    def load_evaluation_points(self, file_path):

        data = read_ply(file_path)
        return np.vstack((data['x'], data['y'], data['z'])).T
Ejemplo n.º 12
0
    N_files = 50

    # Initiaion of Data Set
    data = list(range(N_files))
    #data = np.astype('string')
    points = list(range(N_files))

    # Load Point Cloud
    print('Load Point Cloud begin: ')
    for i in range(N_files):
        if i < 10:
            file_path = '../data/frames/frame_00000%d.ply' % i
        else:
            file_path = '../data/frames/frame_0000%d.ply' % i
        data[i] = read_ply(file_path)
        points[i] = np.vstack((data[i]['x'], data[i]['y'], data[i]['z'])).T
    print('Load Point Cloud end. ')

    # Sub Sampling
    print('Sub-Sampling begin: ')
    """
    Decimation_factor = 50
    for i in range(N_files):
        points[i] = cloud_decimation(points[i],Decimation_factor) 
    """
    voxel_size = 0.2
    for i in range(N_files):
        points[i] = grid_subsampling(points[i], voxel_size)
    print('Sub-Sampling end. ')
Ejemplo n.º 13
0
if __name__ == '__main__':

    # Transformation estimation
    # *************************
    #

    # If statement to skip this part if wanted
    if False:

        # Cloud paths
        bunny_o_path = '../data/bunny_original.ply'
        bunny_r_path = '../data/bunny_returned.ply'

        # Load clouds
        bunny_o = read_ply(bunny_o_path)
        bunny_o = np.vstack((bunny_o['x'], bunny_o['y'], bunny_o['z']))
        bunny_r = read_ply(bunny_r_path)
        bunny_r = np.vstack((bunny_r['x'], bunny_r['y'], bunny_r['z']))
        # Find the best transformation
        R, T = best_rigid_transform(bunny_r, bunny_o)
        # Apply the tranformation
        bunny_t = R.dot(bunny_r) + T

        # Save cloud
        write_ply('../bunny_transformed.ply', [bunny_t.T], ['x', 'y', 'z'])

        # Compute RMS
        diff = bunny_t - bunny_o
        rms = RMS(bunny_t, bunny_o)
        # Print RMS
    0: 'Unclassified',
    1: 'Ground',
    2: 'Building',
    3: 'Poles',
    4: 'Pedestrians',
    5: 'Cars',
    6: 'Vegetation'
}

training_features = np.empty((0, 4))
training_labels = np.empty((0, ))

training_path = './data_wo_ground/training'
file = os.listdir(training_path)[0]

cloud_ply = read_ply(join(training_path, file))
points = np.vstack((cloud_ply['x'], cloud_ply['y'], cloud_ply['z'])).T
labels = cloud_ply['class']

training_inds = np.empty(0, dtype=np.int32)

for label, name in label_names.items():
    if label == 0 or label == 1:
        continue
    label_inds = np.where(labels == label)[0]
    if len(label_inds) <= num_per_class:
        training_inds = np.hstack((training_inds, label_inds))
    else:
        random_choice = np.random.choice(len(label_inds),
                                         num_per_class,
                                         replace=False)
Ejemplo n.º 15
0
import time
import numpy as np
import os
from utils.ply import read_ply
import sys

for path1 in sys.argv[1:]:
    path2 = path1[:-3] + 'npy'
    if os.path.exists(path2):
        continue
    print(path1)
    t0 = time.time()
    arr = read_ply(path1)
    pts = np.vstack((arr['x'], arr['y'], arr['z'], arr['intensity'], arr['red'], arr['green'], arr['blue'], arr['class'])).T
    t1 = time.time()
    print(pts.shape, pts.dtype, t1-t0)
    np.save(path2, pts.astype(np.float32), allow_pickle=False)

    t0 = time.time()
    arr = np.load(path2, mmap_mode='r')
    t1 = time.time()
    print(arr.shape, arr.dtype, t1-t0)
Ejemplo n.º 16
0
if __name__ == '__main__':

    # Transformation estimation
    # *************************
    #

    # If statement to skip this part if wanted
    if False:

        # Cloud paths
        bunny_o_path = '../data/bunny_original.ply'
        bunny_r_path = '../data/bunny_returned.ply'

        # Load clouds
        bunny_o_file = read_ply(bunny_o_path)
        bunny_r_file = read_ply(bunny_r_path)

        bunny_o_pts = np.vstack(
            (bunny_o_file['x'], bunny_o_file['y'], bunny_o_file['z']))  # data
        bunny_r_pts = np.vstack(
            (bunny_r_file['x'], bunny_r_file['y'], bunny_r_file['z']))  # ref

        # Find the best transformation
        R, T = best_rigid_transform(bunny_o_pts, bunny_r_pts)

        # Apply the tranformation
        bunny_m_pts = R.dot(bunny_o_pts) + T

        # Save cloud
        write_ply('../bunny_transform.ply', [bunny_m_pts.T], ['x', 'y', 'z'])
Ejemplo n.º 17
0
                    default="checkpoints/full_cloud.txt",
                    help='File containing the predictions.')
parser.add_argument('--cloud_file',
                    type=str,
                    default="data/MiniChallenge/test/Lille2_al.ply",
                    metavar='C',
                    help='Labelled ground truth cloud.')
parser.add_argument('--latex',
                    type=bool,
                    default=False,
                    help="IoU printing format")
args = parser.parse_args()

pred = np.loadtxt(args.pred_file, dtype=int)

cloud = read_ply(args.cloud_file)

gt10c = cloud['scalar_class']

gt = to6classes(gt10c)

cm = confusionMatrix(pred, gt)

print(cm)

# plt.matshow(cm)
# plt.show()

iouc = iou(cm)

if args.latex:
Ejemplo n.º 18
0
    return normal.astype(np.float32)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Point Cloud Recognition')
    parser.add_argument('--path',
                        type=str,
                        default='data/MiniChallenge/test',
                        metavar='N',
                        help='Folder containing the point clouds')
    parser.add_argument('--labels', type=bool, default=False)
    args = parser.parse_args()

    for fname in glob.glob(args.path + "/*.ply"):
        print(fname)
        cloud = read_ply(fname)
        points = np.vstack((cloud['x'], cloud['y'], cloud['z'])).T

        points = alignCloud(points).astype(np.float32)

        if args.labels:
            labels = cloud['class']
            data = [points, labels]
            colname = ['x', 'y', 'z', 'class']
        else:
            data = [points]
            colname = [
                'x',
                'y',
                'z',
            ]
Ejemplo n.º 19
0
#
#   Here you can define the instructions that are called when you execute this file
#

if __name__ == '__main__':

    # Load point cloud
    # ****************
    #
    #   Load the file '../data/indoor_scan.ply'
    #   (See read_ply function)
    #
    planarities_path = '../data/planarities.ply'
    if os.path.exists(planarities_path):
        # Load point cloud
        data = read_ply(planarities_path)

        # Concatenate data
        points = np.vstack((data['x'], data['y'], data['z'])).T
        normals = np.vstack((data['nx'], data['ny'], data['nz'])).T
        colors = np.vstack((data['red'], data['green'], data['blue'])).T

        planarities = data['planarities']

    else:
        # Path of the file
        file_path = '../data/indoor_scan.ply'

        # Load point cloud
        data = read_ply(file_path)
Ejemplo n.º 20
0
# %% Relabel clouds (merge 2 terrain classes)

# Parameters
original_clouds_folder = Path("..") / "data" / "original_clouds"
relabeled_clouds_folder = Path("..") / "data" / "relabeled_clouds"
overwrite = False

if not relabeled_clouds_folder.exists():
    relabeled_clouds_folder.mkdir()

for ply_file in original_clouds_folder.glob("*.ply"):
    relabeled_ply_file = relabeled_clouds_folder / ply_file.name
    if overwrite or not relabeled_ply_file.exists():
        print(f"Relabeling: {ply_file}")
        data = read_ply(str(ply_file))
        cloud = np.vstack((data['x'], data['y'], data['z'])).T
        rgb_colors = np.vstack((data['red'], data['green'], data['blue'])).T
        dlaser = data['reflectance']
        if "label" not in data.dtype.names:
            print(f"Cancelling relabeling because no label field was given")
            continue
        label = data['label']
        label[label == 1] = 2
        write_ply(str(relabeled_ply_file), [
            cloud, dlaser,
            rgb_colors.astype(np.int32),
            label.astype(np.int32)
        ], ['x', 'y', 'z', 'reflectance', 'red', 'green', 'blue', 'label'])
        print(f"Done relabeling: {ply_file}")
Ejemplo n.º 21
0
    # Load point clouds
    # *****************
    #

    # Transformation estimation
    # *************************
    #

    # If statement to skip this part if wanted
    if True:

        # Load clouds
        bunny_o_path = '../data/bunny_original.ply'
        bunny_r_path = '../data/bunny_returned.ply'
        bunny_o_ply = read_ply(bunny_o_path)
        bunny_r_ply = read_ply(bunny_r_path)
        bunny_o = np.vstack(
            (bunny_o_ply['x'], bunny_o_ply['y'], bunny_o_ply['z']))
        bunny_r = np.vstack(
            (bunny_r_ply['x'], bunny_r_ply['y'], bunny_r_ply['z']))

        # Find the best transformation
        R, T = best_rigid_transform(bunny_r, bunny_o)

        # Apply the tranformation
        bunny_r_opt = R.dot(bunny_r) + T

        # Save cloud
        write_ply('../bunny_r_opt', [bunny_r_opt.T], ['x', 'y', 'z'])
Ejemplo n.º 22
0
    def show_activation(path, relu_idx=0, save_video=False):
        """
        This function show the saved input point clouds maximizing the activations. You can also directly load the files
        in a visualization software like CloudCompare.
        In the case of relu_idx = 0 and if gaussian mode, the associated filter is also shown. This function can only
        show the filters for the last saved epoch.
        """

        ################
        # Find the files
        ################

        # Check visu folder
        visu_path = join(drive_results,
                         'visu',
                         'visu_' + path.split('/')[-1],
                         'top_activations',
                         'Relu{:02d}'.format(relu_idx))
        if not exists(visu_path):
            message = 'Relu {:d} activations of the model {:s} not found.'
            raise ValueError(message.format(relu_idx, path.split('/')[-1]))

        # Get the list of files
        feature_files = np.sort([f for f in listdir(visu_path) if f.endswith('.ply')])
        if len(feature_files) == 0:
            message = 'Relu {:d} activations of the model {:s} not found.'
            raise ValueError(message.format(relu_idx, path.split('/')[-1]))

        # Load mode
        config = Config()
        config.load(path)
        mode = config.convolution_mode

        #################
        # Get activations
        #################

        all_points = []
        all_responses = []

        for file in feature_files:
            # Load points
            data = read_ply(join(visu_path, file))
            all_points += [np.vstack((data['x'], data['y'], data['z'])).T]
            all_responses += [data['responses']]

        ###########################
        # Interactive visualization
        ###########################

        # Create figure for features
        fig1 = mlab.figure('Features', bgcolor=(0.5, 0.5, 0.5), size=(640, 480))
        fig1.scene.parallel_projection = False

        # Indices
        global file_i
        file_i = 0

        def update_scene():

            #  clear figure
            mlab.clf(fig1)

            # Plot new data feature
            points = all_points[file_i]
            responses = all_responses[file_i]
            min_response, max_response = np.min(responses), np.max(responses)
            responses = (responses - min_response) / (max_response - min_response)

            # Rescale points for visu
            points = (points * 1.5 / config.in_radius + np.array([1.0, 1.0, 1.0])) * 50.0

            # Show point clouds colorized with activations
            activations = mlab.points3d(points[:, 0],
                                        points[:, 1],
                                        points[:, 2],
                                        responses,
                                        scale_factor=3.0,
                                        scale_mode='none',
                                        vmin=0.1,
                                        vmax=0.9,
                                        figure=fig1)

            # New title
            mlab.title(feature_files[file_i], color=(0, 0, 0), size=0.3, height=0.01)
            text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
            mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
            mlab.orientation_axes()

            return

        def keyboard_callback(vtk_obj, event):
            global file_i

            if vtk_obj.GetKeyCode() in ['g', 'G']:

                file_i = (file_i - 1) % len(all_responses)
                update_scene()

            elif vtk_obj.GetKeyCode() in ['h', 'H']:

                file_i = (file_i + 1) % len(all_responses)
                update_scene()

            return

        # Draw a first plot
        update_scene()
        fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
        mlab.show()

        return
Ejemplo n.º 23
0
#
#
#   Here you can define the instructions that are called when you execute this file
#

if __name__ == '__main__':

    # PCA verification
    # ****************
    #

    if False:

        # Load cloud as a [N x 3] matrix
        cloud_path = '../data/Lille_street_small.ply'
        cloud_ply = read_ply(cloud_path)
        cloud = np.vstack((cloud_ply['x'], cloud_ply['y'], cloud_ply['z'])).T

        # Compute PCA on the whole cloud
        eigenvalues, eigenvectors = local_PCA(cloud)

        # Print your result
        print(eigenvalues)

        # Expected values :
        #
        #   [lambda_3; lambda_2; lambda_1] = [ 5.25050177 21.7893201  89.58924003]
        #
        #   (the convention is always lambda_1 >= lambda_2 >= lambda_3)
        #
Ejemplo n.º 24
0
if __name__ == '__main__':

    # Transformation estimation
    # *************************
    #

    # If statement to skip this part if wanted
    if False:

        # Cloud paths
        bunny_o_path = '../data/bunny_original.ply'
        bunny_r_path = '../data/bunny_returned.ply'

        # Load point cloud
        data_o = read_ply(bunny_o_path)
        points_o = np.vstack((data_o['x'], data_o['y'], data_o['z']))
        data_r = read_ply(bunny_r_path)
        points_r = np.vstack((data_r['x'], data_r['y'], data_r['z']))

        # Find the best transformation
        R, T = best_rigid_transform(points_r, points_o)

        # Apply the tranformation
        transformed_points = R @ points_r + T

        # Save cloud
        write_ply('../bunny_recaled.ply', [transformed_points.T],
                  ['x', 'y', 'z'])

        # Compute RMS
Ejemplo n.º 25
0
    hoppe = np.sum(closest_normals * (voxels[:, np.newaxis] - closest_points),
                   axis=-1)

    # finally compute f(x)
    volume = np.sum(hoppe * theta, axis=-1) / np.sum(theta, axis=-1)

    return volume.reshape(*d * [number_cells + 1])


if __name__ == '__main__':

    # Path of the file
    file_path = '../data/bunny_normals.ply'

    # Load point cloud
    data = read_ply(file_path)

    # Concatenate data
    points = np.vstack((data['x'], data['y'], data['z'])).T
    normals = np.vstack((data['nx'], data['ny'], data['nz'])).T

    # Compute the min and max of the data points
    min_grid = np.copy(points[0, :])
    max_grid = np.copy(points[0, :])
    for i in range(1, points.shape[0]):
        for j in range(0, 3):
            if (points[i, j] < min_grid[j]):
                min_grid[j] = points[i, j]
            if (points[i, j] > max_grid[j]):
                max_grid[j] = points[i, j]
Ejemplo n.º 26
0
    def test_multi_segmentation(self,
                                model,
                                dataset,
                                num_votes=30,
                                num_saves=10):

        ##################
        # Pre-computations
        ##################

        print('Preparing test structures')
        t1 = time.time()

        # Collect original test file names
        original_path = join(dataset.path, 'test_ply')
        test_names = [
            f[:-4] for f in listdir(original_path) if f[-4:] == '.ply'
        ]
        test_names = np.sort(test_names)

        original_labels = []
        original_points = []
        projection_inds = []
        for i, cloud_name in enumerate(test_names):

            # Read data in ply file
            data = read_ply(join(original_path, cloud_name + '.ply'))
            points = np.vstack((data['x'], -data['z'], data['y'])).T
            original_labels += [data['label'] - 1]
            original_points += [points]

            # Create tree structure to compute neighbors
            tree = KDTree(dataset.input_points['test'][i])
            projection_inds += [
                np.squeeze(tree.query(points, return_distance=False))
            ]

        t2 = time.time()
        print('Done in {:.1f} s\n'.format(t2 - t1))

        ##########
        # Initiate
        ##########

        # Test saving path
        if model.config.saving:
            test_path = join('test', model.saving_path.split('/')[-1])
            if not exists(test_path):
                makedirs(test_path)
        else:
            test_path = None

        # Initialise iterator with test data
        self.sess.run(dataset.test_init_op)

        # Initiate result containers
        average_predictions = [
            np.zeros((1, 1), dtype=np.float32) for _ in test_names
        ]

        #####################
        # Network predictions
        #####################

        mean_dt = np.zeros(2)
        last_display = time.time()
        for v in range(num_votes):

            # Run model on all test examples
            # ******************************

            # Initiate result containers
            all_predictions = []
            all_obj_inds = []

            while True:
                try:

                    # Run one step of the model
                    t = [time.time()]
                    ops = (self.prob_logits, model.labels,
                           model.inputs['super_labels'],
                           model.inputs['object_inds'],
                           model.inputs['in_batches'])
                    preds, labels, obj_labels, o_inds, batches = self.sess.run(
                        ops, {model.dropout_prob: 1.0})
                    t += [time.time()]

                    # Stack all predictions for each class separately
                    max_ind = np.max(batches)
                    for b_i, b in enumerate(batches):

                        # Eliminate shadow indices
                        b = b[b < max_ind - 0.5]

                        # Get prediction (only for the concerned parts)
                        obj = obj_labels[b[0]]
                        predictions = preds[b][:, :model.config.
                                               num_classes[obj]]

                        # Stack all results
                        all_predictions += [predictions]
                        all_obj_inds += [o_inds[b_i]]

                    # Average timing
                    t += [time.time()]
                    mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) -
                                                       np.array(t[:-1]))

                    # Display
                    if (t[-1] - last_display) > 1.0:
                        last_display = t[-1]
                        message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'
                        print(
                            message.format(
                                v,
                                100 * len(all_predictions) / dataset.num_test,
                                1000 * (mean_dt[0]), 1000 * (mean_dt[1])))

                except tf.errors.OutOfRangeError:
                    break

            # Project predictions on original point clouds
            # ********************************************

            print('\nGetting test confusions')
            t1 = time.time()

            for i, probs in enumerate(all_predictions):

                # Interpolate prediction from current positions to original points
                obj_i = all_obj_inds[i]
                proj_predictions = probs[projection_inds[obj_i]]

                # Average prediction across votes
                average_predictions[obj_i] = average_predictions[obj_i] + \
                                             (proj_predictions - average_predictions[obj_i]) / (v + 1)

            Confs = []
            for obj_i, avg_probs in enumerate(average_predictions):

                # Compute confusion matrices
                parts = [j for j in range(avg_probs.shape[1])]
                Confs += [
                    confusion_matrix(original_labels[obj_i],
                                     np.argmax(avg_probs, axis=1), parts)
                ]

            t2 = time.time()
            print('Done in {:.1f} s\n'.format(t2 - t1))

            # Save the best/worst segmentations per class
            # *******************************************

            print('Saving test examples')
            t1 = time.time()

            # Regroup confusions per object class
            Confs = np.array(Confs)
            obj_mIoUs = []
            for l in dataset.label_values:

                # Get confusions for this object
                obj_inds = np.where(dataset.input_labels['test'] == l)[0]
                obj_confs = np.stack(Confs[obj_inds])

                # Get IoU
                obj_IoUs = IoU_from_confusions(obj_confs)
                obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]

                # Get X best and worst prediction
                order = np.argsort(obj_mIoUs[-1])
                worst_inds = obj_inds[order[:num_saves]]
                best_inds = obj_inds[order[:-num_saves - 1:-1]]
                worst_IoUs = obj_IoUs[order[:num_saves]]
                best_IoUs = obj_IoUs[order[:-num_saves - 1:-1]]

                # Save the names in a file
                obj_path = join(test_path, dataset.label_to_names[l])
                if not exists(obj_path):
                    makedirs(obj_path)
                worst_file = join(obj_path, 'worst_inds.txt')
                best_file = join(obj_path, 'best_inds.txt')
                with open(worst_file, "w") as text_file:
                    for w_i, w_IoUs in zip(worst_inds, worst_IoUs):
                        text_file.write('{:d} {:s} :'.format(
                            w_i, test_names[w_i]))
                        for IoU in w_IoUs:
                            text_file.write(' {:.1f}'.format(100 * IoU))
                        text_file.write('\n')

                with open(best_file, "w") as text_file:
                    for b_i, b_IoUs in zip(best_inds, best_IoUs):
                        text_file.write('{:d} {:s} :'.format(
                            b_i, test_names[b_i]))
                        for IoU in b_IoUs:
                            text_file.write(' {:.1f}'.format(100 * IoU))
                        text_file.write('\n')

                # Save the clouds
                for i, w_i in enumerate(worst_inds):
                    filename = join(obj_path, 'worst_{:02d}.ply'.format(i + 1))
                    preds = np.argmax(average_predictions[w_i],
                                      axis=1).astype(np.int32)
                    write_ply(
                        filename,
                        [original_points[w_i], original_labels[w_i], preds],
                        ['x', 'y', 'z', 'gt', 'pre'])

                for i, b_i in enumerate(best_inds):
                    filename = join(obj_path, 'best_{:02d}.ply'.format(i + 1))
                    preds = np.argmax(average_predictions[b_i],
                                      axis=1).astype(np.int32)
                    write_ply(
                        filename,
                        [original_points[b_i], original_labels[b_i], preds],
                        ['x', 'y', 'z', 'gt', 'pre'])

            t2 = time.time()
            print('Done in {:.1f} s\n'.format(t2 - t1))

            # Display results
            # ***************

            objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]
            instance_average = np.mean(np.hstack(obj_mIoUs))
            class_average = np.mean(objs_average)

            print(
                'Objs | Inst | Air  Bag  Cap  Car  Cha  Ear  Gui  Kni  Lam  Lap  Mot  Mug  Pis  Roc  Ska  Tab'
            )
            print(
                '-----|------|--------------------------------------------------------------------------------'
            )

            s = '{:4.1f} | {:4.1f} | '.format(100 * class_average,
                                              100 * instance_average)
            for AmIoU in objs_average:
                s += '{:4.1f} '.format(100 * AmIoU)
            print(s + '\n')

            # Initialise iterator with test data
            self.sess.run(dataset.test_init_op)

        return