Beispiel #1
0
 def test_scale_vertices(self):
     # Run and check results visually.
     # - Simple mesh.
     mesh = pymesh.load_mesh(
         os.path.join(current_dir, '../common_data/simple_mesh.ply'))
     mesh = augmentation(mesh=mesh,
                         vertices_scale_mean=1.0,
                         vertices_scale_var=0.1)
     if (not os.path.exists(os.path.join(current_dir, '../output_data/'))):
         os.mkdir(os.path.join(current_dir, '../output_data'))
     pymesh.save_mesh(mesh=mesh,
                      filename=os.path.join(
                          current_dir,
                          '../output_data/simple_mesh_scale_vertices.ply'),
                      ascii=True)
     # - Modified non-flat version of simple mesh.
     mesh = pymesh.load_mesh(
         os.path.join(current_dir,
                      '../common_data/simple_mesh_nonflat.ply'))
     mesh = augmentation(mesh=mesh,
                         vertices_scale_mean=1.0,
                         vertices_scale_var=0.1)
     pymesh.save_mesh(
         mesh=mesh,
         filename=os.path.join(
             current_dir,
             '../output_data/simple_mesh_nonflat_scale_vertices.ply'),
         ascii=True)
    def process(self):
        # Verify that no previous data is being inadvertently erased.
        processed_data_folder = osp.join(self.processed_dir, self.__split)
        if (osp.exists(processed_data_folder)):
            if (os.listdir(processed_data_folder)):
                raise IOError(
                    "The folder containing the processed data, "
                    f"'{processed_data_folder}', is not empty. Most likely the "
                    "root folder you have set for the current dataset "
                    "coincides with that of a previously-generated dataset, "
                    "and the current dataset has parameters not fully "
                    "compatible with those used to generate the data already "
                    "in the folder. Please double-check the dataset parameters "
                    "or delete the content of the folder/specify a different "
                    "root folder of the dataset.")
        # Each category is assigned an index, to be used as target in
        # classification.
        category_indices = [
            self.valid_categories.index(category)
            for category in self.__categories
        ]
        for category, category_index in zip(self.__categories,
                                            category_indices):
            process_subfolder = osp.join(processed_data_folder, category)
            if (not osp.exists(process_subfolder)):
                os.makedirs(process_subfolder)

            paths = glob.glob(
                osp.join(self.raw_dir, f'coseg_{category}', self.__split,
                         '*.ply'))

            for path in paths:
                # Mesh name without extension.
                mesh_name = path.rpartition('/')[2].split('.')[0]
                # Load mesh.
                mesh = pymesh.load_mesh(path)
                # Load per-face class label.
                face_label_file = osp.join(self.raw_dir, f'coseg_{category}',
                                           'seg', f"{mesh_name}.eseg")
                with open(face_label_file, 'r') as f:
                    y = np.loadtxt(f, dtype='long')
                # Preprocess mesh.
                mesh = preprocess_mesh(
                    input_mesh=mesh,
                    prevent_nonmanifold_edges=self.__prevent_nonmanifold_edges)
                # Perform data augmentation and post-augmentation.
                for augmentation_idx in range(self.__num_augmentations):
                    augmented_mesh = augmentation(
                        mesh=mesh,
                        vertices_scale_mean=self.__vertices_scale_mean,
                        vertices_scale_var=self.__vertices_scale_var,
                        edges_flip_fraction=self.__edges_flip_fraction)
                    postaugmented_mesh = post_augmentation(
                        mesh=augmented_mesh,
                        slide_vertices_fraction=self.__slide_vertices_fraction)
                    # Convert the mesh to dual-primal graphs.
                    graph_creator = GraphCreator(
                        mesh=postaugmented_mesh,
                        single_dual_nodes=self.__single_dual_nodes,
                        undirected_dual_edges=self.__undirected_dual_edges,
                        primal_features_from_dual_features=self.
                        __primal_features_from_dual_features,
                        prevent_nonmanifold_edges=self.
                        __prevent_nonmanifold_edges)
                    primal_graph, dual_graph = graph_creator.create_graphs()
                    (primal_edge_to_dual_node_idx
                     ) = graph_creator.primal_edge_to_dual_node_idx
                    # Add the ground-truth per-face class label to the primal
                    # graph.
                    assert (len(y) == primal_graph.num_nodes)
                    primal_graph.y = torch.from_numpy(y).long()
                    # Save the graphs and the dictionary.
                    torch.save(
                        primal_graph,
                        osp.join(
                            process_subfolder,
                            f"{mesh_name}_aug_{augmentation_idx}_primal.pt"))
                    torch.save(
                        dual_graph,
                        osp.join(
                            process_subfolder,
                            f"{mesh_name}_aug_{augmentation_idx}_dual.pt"))
                    petdni_filename = osp.join(
                        process_subfolder,
                        f"{mesh_name}_aug_{augmentation_idx}_petdni.pkl")
                    pymesh.save_mesh(
                        osp.join(process_subfolder,
                                 f"{mesh_name}_aug_{augmentation_idx}.obj"),
                        postaugmented_mesh)
                    try:
                        with open(petdni_filename, 'wb') as f:
                            pkl.dump(primal_edge_to_dual_node_idx, f)
                    except IOError:
                        raise IOError("Error while writing file "
                                      f"'{petdni_filename}'. Exiting.")

        if (self.pre_filter is not None):
            raise NotImplementedError

        if (self.pre_transform is not None):
            raise NotImplementedError

        # Save the input parameters of the dataset, so that when using it
        # without repreprocessing the data, one can make sure that the input
        # parameters match those with which the preprocessed data saved to disk
        # was generated.
        dataset_parameters_filename = osp.join(
            self.processed_dir, f'processed_data_params_{self.__split}.pkl')
        try:
            with open(dataset_parameters_filename, 'wb') as f:
                pkl.dump(self.input_parameters, f)

        except IOError:
            raise IOError("Error while writing file dataset parameter file "
                          f"'{dataset_parameters_filename}'. Exiting.")