示例#1
0
    def query(self,
              model_path,
              n_samples_query,
              n_results,
              custom=False,
              weights=False):
        vertices, element_dict, info = read_model(model_path)
        shape = Shape(vertices, element_dict, info)
        shape = process(shape, n_vertices_target=self.n_vertices_target)
        feature_dict = extract_features(shape,
                                        self.n_bins,
                                        n_samples=n_samples_query)
        feature_df = data_dict_parser(feature_dict)
        feature_df, _ = sample_normalizer(
            feature_df,
            *self.sample_normalization_parameters,
            divide_distributions=self.divide_distributions)
        feature_df_numeric = feature_df.select_dtypes(np.number)
        #Make sure columns identical and ordered
        assert list(feature_df_numeric.columns) == list(
            self.df_numeric.columns), "Column mismatch!"
        query_vector = feature_df_numeric.iloc[0, :].values.astype(np.float32)

        if not custom:

            distances, indices = self.faiss_knn.query(query_vector, n_results)
        else:
            distances, indices = self.custom_knn.query(query_vector,
                                                       n_results,
                                                       weights=weights)

        distances = distances.flatten().tolist()  #Flatten batch dimension
        indices = indices.flatten().tolist()
        df_slice = self.df[self.df.index.isin(indices)]
        df_slice['distance'] = df_slice.index.map(
            lambda x: distances[indices.index(x)])

        #Add missing data to query df
        feature_df['file_name'] = str(model_path)
        feature_df['classification'] = 'query_input'
        feature_df['distance'] = 0
        # Put it at top of slice
        df_slice = pd.concat([df_slice, feature_df])
        df_slice = df_slice.sort_values('distance')

        return distances, indices, df_slice
    def process_subset(self, file_list, apply_processing, n_vertices_target,
                       n_bins, process_index):
        print(f' {process_index} : Starting subset processor!')
        data_subset = {k: [] for k in self.columns + self.col_array}
        for index, file in enumerate(file_list):

            if index % 50 == 0:
                print(f' {process_index} : Is at {index}/{len(file_list)}!')

            vertices, element_dict, info = read_model(Path(file))
            shape = Shape(vertices, element_dict, info)

            if apply_processing:

                shape = process(shape, n_vertices_target=n_vertices_target)

            else:
                shape.make_pyvista_mesh()

            id = os.path.basename(file).split(".")[0].replace("m", "")
            if id in self.classification_dict.keys():
                classification = self.classification_dict[id]

            else:

                classification = None

            data_subset["classification"].append(classification)
            data_subset["file_name"].append(file)

            #Get features
            feature_dict = extract_features(shape,
                                            n_bins=n_bins,
                                            n_samples=self.n_samples)

            #Add them to total data

            for key, val in feature_dict.items():
                data_subset[key].append(val)
        print(f'{process_index} : Finished!')
        return data_subset
示例#3
0
def extract_features(shape, n_bins, n_samples):
    '''Extract relevant features, input must be Shape or path'''

    if isinstance(shape, Shape):
        pass
    elif isinstance(shape, Path) or isinstance(shape, str):
        shape = Path(shape)

        shape = Shape(*read_model(shape))
    else:
        raise Exception("Input must be Shape or path")

    #Make pyvista mesh if not already made
    if not shape.pyvista_mesh:
        shape.make_pyvista_mesh()
    feature_dict = {}

    feature_dict["n_vertices"] = shape.n_vertices
    feature_dict["n_triangles"] = shape.n_triangles
    feature_dict["n_quads"] = shape.n_quads
    shape.bounding_rect()
    feature_dict["bounding_box"] = shape.bounding_rect_vertices

    feature_dict["volume"] = np.maximum(
        volume(shape.vertices, shape.element_dict["triangles"]),
        0.01)  #clamp to avoid 0 volume for 2d models

    feature_dict["surface_area"] = shape.pyvista_mesh.area
    bounding_box_sides = shape.bounding_rect_vertices.reshape(
        (-1, 3)).max(axis=0) - shape.bounding_rect_vertices.reshape(
            (-1, 3)).min(axis=0)
    bounding_box_sides = np.maximum(
        bounding_box_sides,
        0.01)  #clamp above so no zero division for essentially 2d models
    feature_dict["bounding_box_ratio"] = np.max(bounding_box_sides) / np.min(
        bounding_box_sides)
    feature_dict["compactness"] = np.power(feature_dict["surface_area"], 3) / (
        36 * np.pi * np.power(feature_dict["volume"], 2))
    feature_dict["bounding_box_volume"] = np.prod(bounding_box_sides)
    feature_dict["diameter"] = calculate_diameter(shape.vertices)

    *_, eigenvalues = align(shape.vertices)

    feature_dict["eccentricity"] = np.max(eigenvalues) / np.maximum(
        np.min(eigenvalues), 0.01)  #also clamp
    #Histograms
    feature_dict["angle_three_vertices"] = angle_three_random_vertices(
        shape.vertices, n_bins=n_bins, n_samples=n_samples)
    feature_dict["barycenter_vertice"] = barycenter_vertice(
        shape.vertices,
        np.zeros(3, dtype=np.float32),
        n_bins=n_bins,
        n_samples=n_samples)
    feature_dict["two_vertices"] = two_vertices(shape.vertices,
                                                n_bins=n_bins,
                                                n_samples=n_samples)
    feature_dict["square_area_triangle"] = square_area_triangle(
        shape.vertices, n_bins=n_bins, n_samples=n_samples)
    feature_dict["cube_volume_tetrahedron"] = cube_volume_tetrahedron(
        shape.vertices, n_bins=n_bins, n_samples=n_samples)

    return feature_dict
示例#4
0
    p3 = vertices_used[2::3]

    return (p1 * np.cross(p2, p3)).sum() / 6


def volume(vertices, triangles):
    try:
        hull = ConvexHull(vertices)

    except:
        print("Could not calculate hull for diameter")
        return 0.1
    return hull.volume


if __name__ == "__main__":

    # coarse_1 = get_princeton_classifications(r"data\benchmark\classification\v1\coarse1\coarse1Train.cla")
    base = get_princeton_classifications(
        r"data\benchmark\classification\v1\base\train.cla")

    path = Path(r"data/cube2.off")
    path = Path('data/benchmark/db/0/m0/m0.ply')
    vertices, element_dict, info = read_model(path)
    print(volume(vertices, element_dict["triangles"]))
    #angle_three_random_vertices(vertices,n_samples=1e+6)
    #barycenter_vertice(vertices,np.zeros(3),n_samples=1000)
    #two_vertices(vertices,n_samples=1000)
    #square_area_triangle(vertices,n_samples=1000)
    #cube_volume_tetrahedron(vertices,n_samples=1000)
示例#5
0
    def process(self, path=False, vertices=False, indices=False, info= False):


        if path:
            vertices, element_dict, info = read_model(path)
            indices = element_dict["triangles"] 
            print(f"Reading {path}")
        else:
            assert (type(vertices) ==np.ndarray and type(indices) ==np.ndarray), "Define path or both vertices and indices"
        
        pre_box = indices.size
        
        bounding_rect_vertices, bounding_rect_indices = bounding_box(vertices,indices)

        vertices = np.append(vertices,bounding_rect_vertices)

        

        

        indices = np.append(indices,bounding_rect_indices)

        vertex_normals = pyrr.vector3.generate_vertex_normals(vertices.reshape((-1,3)), indices.reshape((-1,3)), normalize_result=True).flatten()

        
        
        vertices_final = np.append(vertices,vertex_normals)



        


       

        # initializing glfw library
        if not glfw.init():
            raise Exception("glfw can not be initialized!")

        # creating the window
        window = glfw.create_window(1280, 720, "My OpenGL window", None, None)

        input_handler = InputHandler(window)
        # check if window was created
        if not window:
            glfw.terminate()
            raise Exception("glfw window can not be created!")

        # set window's position
        glfw.set_window_pos(window, 400, 200)

        # set the callback function for window resize

        glfw.set_window_size_callback(window, self.window_resize)
        # make the context current

        glfw.make_context_current(window)

        

        

        as_points = vertices.reshape(-1, 3)

        barycenter = as_points.mean(axis=0)

        max_x, max_y, max_z = as_points.max(axis=0)
        min_x, min_y, min_z = as_points.min(axis=0)

        middle_point = np.array(
            [min_x + (max_x-min_x)/2, min_y + (max_y-min_y)/2, min_z + (max_z-min_z)/2])





        shader = shader_loader.compile_shader("src/shaders/vert.vs", "src/shaders/frag.fs")




        
        # Vertex Buffer Object
        VBO = glGenBuffers(1)
        VA0 = glGenVertexArrays(1)
        glBindBuffer(GL_ARRAY_BUFFER, VBO)
        glBufferData(GL_ARRAY_BUFFER, vertices_final.nbytes, vertices_final, GL_STATIC_DRAW)

      


        
        #positions
        glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3*4, ctypes.c_void_p(0))
        glEnableVertexAttribArray(0)
        #normals
        glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 3*4, ctypes.c_void_p(4*len(vertices)))
        glEnableVertexAttribArray(1)
    





        # Element Buffer Object
        EBO = glGenBuffers(1)
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
        glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)



   
        glUseProgram(shader)
        glClearColor(0, 0.1, 0.1, 1)

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glEnable(GL_DEPTH_TEST)

        ## Shader matrices
        model_loc = glGetUniformLocation(shader, "model")

        self.proj_loc = glGetUniformLocation(shader, "projection")

        view_loc = glGetUniformLocation(shader, "view")

        color_loc = glGetUniformLocation(shader,"color")

        transform_loc = glGetUniformLocation(shader, "transform")


        light_loc = glGetUniformLocation(shader, "light")

        window_height = glfw.get_window_size(window)[1]
        window_width = glfw.get_window_size(window)[0]
        projection = pyrr.matrix44.create_perspective_projection_matrix(
            fovy=45, aspect=window_width/window_height, near=0.1, far=100)
        #projection = pyrr.matrix44.create_orthogonal_projection_matrix(0,1280,0,720,-1000,1000)

        scale = pyrr.matrix44.create_from_scale(pyrr.Vector3([1]*3))


        # eye pos , target, up
        view = pyrr.matrix44.create_look_at(pyrr.Vector3(
            [0, 0, 3]), pyrr.Vector3([0, 0, 0]), pyrr.Vector3([0, 1, 0]))
        proj_matrix = glGetUniformLocation(shader, "projection")


        initial_offset = middle_point
        translation = pyrr.matrix44.create_from_translation(
            pyrr.Vector3(-initial_offset))


    


        ## Input

        
        rotation = pyrr.matrix44.create_from_axis_rotation(np.array([0, 1, 0]), 0)

        glfw.set_key_callback(window, input_handler.keyboard_handler)
        glfw.set_scroll_callback(window, input_handler.scroll_handler)
        glfw.set_mouse_button_callback(window, input_handler.mouse_handler)


        previous_displacement = np.zeros(2)



        rot_y = pyrr.Matrix44.from_y_rotation(0.8 * glfw.get_time() )

        glUniformMatrix4fv(transform_loc, 1, GL_FALSE, rot_y)

        
  



        glEnable(GL_LIGHTING)
        glEnable(GL_COLOR_MATERIAL)
        while not glfw.window_should_close(window):
            glfw.poll_events()

            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)

            if input_handler.right_key_pressed:
                current_cursor_position = glfw.get_cursor_pos(window)
                cursor_displacement = np.array(current_cursor_position) - np.array(
                    input_handler.start_cursor_position) - previous_displacement
                input_handler.rotation_list[0] += input_handler.rotations_per_screen_hor * \
                    cursor_displacement[0]/window_width
                input_handler.rotation_list[1] += input_handler.rotations_per_screen_vert * \
                    cursor_displacement[1]/window_height
                previous_displacement = cursor_displacement

            rot_x = pyrr.Matrix44.from_x_rotation(input_handler.rotation_list[1])

            rot_y = pyrr.Matrix44.from_y_rotation(input_handler.rotation_list[0])

            rotation = pyrr.matrix44.multiply(rot_x, rot_y)

            view = pyrr.matrix44.create_look_at(pyrr.Vector3(input_handler.eye), pyrr.Vector3(
                input_handler.target), pyrr.Vector3(input_handler.up))

            light = pyrr.matrix44.create_identity()
            glUniformMatrix4fv(light_loc, 1, GL_FALSE, light)
       
            model = pyrr.matrix44.multiply(scale, translation)
            model = pyrr.matrix44.multiply(model, rotation)

            glUniformMatrix4fv(model_loc, 1, GL_FALSE, model)
            glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
            glUniformMatrix4fv(proj_matrix, 1, GL_FALSE, projection)

            default_RGB  = np.zeros(shape=(3,),dtype=np.float32) +1

            color = pyrr.Vector3(default_RGB)
            glUniform3fv(color_loc,1,color)


           
    

            if input_handler.mode == "default":
    
                glDrawElements(GL_TRIANGLES, pre_box, GL_UNSIGNED_INT, None)
            elif input_handler.mode == "point_cloud":
                
                glDrawElements(GL_POINTS, pre_box, GL_UNSIGNED_INT, None)
                
            elif input_handler.mode=="wireframe":
                glEnable(GL_POLYGON_OFFSET_FILL)
                glPolygonOffset(1.0, 2)
                glDrawElements(GL_TRIANGLES, pre_box, GL_UNSIGNED_INT, None)
                RGB = np.zeros(shape=(3,),dtype=np.float32) 
                color = pyrr.Vector3(RGB)
                glUniform3fv(color_loc,1,RGB)
                glDrawElements(GL_LINES, pre_box, GL_UNSIGNED_INT, None)
            elif input_handler.mode == "bounding_box":
                glDrawElements(GL_LINES, len(indices), GL_UNSIGNED_INT, None)
            else:
                raise Exception("Invalid Mode!")

   
          

            

            glfw.swap_buffers(window)

        # terminate glfw, free up allocated resources
        glfw.terminate()
from file_reader import read_model, write_model_as_ply
from utils import get_all_file_paths
from tqdm import tqdm

# Script that makes ply copy of each model in dataset, for three.js viewing
dataset_path = 'data/benchmark'

off_paths = get_all_file_paths(dataset_path, '.off')

for path in tqdm(off_paths):
    verts, faces, _ = read_model(path)
    new_path = path.replace('.off', '.ply')
    write_model_as_ply(verts, faces['triangles'], new_path)