Ejemplo n.º 1
0
def plot_prediction(index, prediction, gound_truth, labels, path, title=""):
    """Plots a picture for each class with the mesh and the labels predicted for it in red if it is wrong and green if it is correct.

    :param index: array[n_labels]
        An array containing the index of the test meshes for each class
    :param prediction: array[n_prediction]
        The result of the classification
    :param gound_truth: array[n_prediction]
        The gound truth labels in the same order of prediction
    :param labels: list
        A list containing the classes
    :param path: string or os.path
        The path where to save the plots
    :return:
    """
    os.makedirs(os.path.join(path, "Qualitative"), exist_ok=True)
    i_prediction = 0
    err = prediction == gound_truth

    for inds, l in zip(index, labels):
        plotter = pv.Plotter(shape=(1, index[0].shape[0]),
                             off_screen=False,
                             window_size=[1024, 1024 // 2])
        plotter.set_background("white")
        j = 0
        offs = glob.glob(os.path.join('.', 'Dataset', l, "*.off"))
        for i in inds:

            mesh = pv.read(offs[i])
            plotter.subplot(0, j)
            plotter.add_text(labels[prediction[i_prediction]],
                             color="green" if err[i_prediction] else "red",
                             font_size=10)
            plotter.add_mesh(mesh, smooth_shading=True, color="grey")
            j += 1
            i_prediction += 1

        plotter.save_graphic(
            os.path.join(path, "Qualitative", l + title + '.pdf'))
    pv.close_all()
    return
Ejemplo n.º 2
0
def get_mesh_pv(self, indices=None):
    """Return the pyvista mesh object (or submesh).

    Parameters
    ----------
    self : MeshVTK
        a MeshVTK object
    indices : list
        list of the points to extract (optional)

    Returns
    -------
    mesh : pyvista.core.pointset.UnstructuredGrid
        a pyvista UnstructuredGrid object
    """

    # Already available => Return
    if self.mesh is not None:
        # Extract submesh
        if indices is not None:
            mesh = self.mesh.extract_points(indices)
        return mesh

    # Read mesh file
    else:
        if self.format != "vtk":
            # Write vtk files with meshio
            mesh = read(self.path + "/" + self.name + "." + self.format)
            mesh.write(self.path + "/" + self.name + ".vtk")

        # Read .vtk file with pyvista
        mesh = pv.read(self.path + "/" + self.name + ".vtk")

        # Extract submesh
        if indices is not None:
            mesh = mesh.extract_points(indices)

        if self.is_pyvista_mesh:
            self.mesh = mesh

        return mesh
Ejemplo n.º 3
0
    def __init__(self, *args):
        """ initializes MeshFix using a mesh """
        def parse_mesh(mesh):
            self.v = mesh.points
            faces = mesh.faces
            if faces.size % 4:
                raise Exception(
                    'Invalid mesh.  Must be an all triangular mesh.')
            self.f = np.ascontiguousarray(faces.reshape(-1, 4)[:, 1:])

        if not args:
            raise invalid_input
        elif isinstance(args[0], pv.PolyData):
            parse_mesh(args[0])
        elif isinstance(args[0], np.ndarray):
            self._load_arrays(args[0], args[1])
        elif isinstance(args[0], str):
            mesh = pv.read(args[0])
            parse_mesh(mesh)
        else:
            raise invalid_input
Ejemplo n.º 4
0
def get_mesh_pv(self, path=RESULT_DIR + "/temp.vtk", indices=None):
    """Return the pyvista mesh object (or submesh).

    Parameters
    ----------
    self : MeshMat
        a MeshMat object
    indices : list
        list of the nodes to extract (optional)

    Returns
    -------
    mesh : pyvista.core.pointset.UnstructuredGrid
        a pyvista UnstructuredGrid object
    """

    nodes = self.get_node()
    cells, _, _ = self.get_cell()

    cells_meshio = list()
    for key in cells:
        cells_meshio.append((key, cells[key]))
        # Write .vtk file using meshio

    # Make sure that the file exists
    if not isdir(dirname(path)):
        makedirs(dirname(path))

    meshio.write_points_cells(filename=path, points=nodes, cells=cells_meshio)

    # Read .vtk file with pyvista
    mesh = pv.read(path)

    # Extract submesh
    if indices is not None:
        mesh = mesh.extract_points(indices)

    remove(path)

    return mesh
Ejemplo n.º 5
0
    def open_mesh(self):
        """ add a mesh to the pyqt frame """
        global mesh

        # open file
        file_info = QtWidgets.QFileDialog.getOpenFileName()
        file_dir = file_info[0]
        
        # determine file type and if conversion needed
        head, tail = os.path.split(file_dir)
        root, ext = os.path.splitext(tail)

        # convert mesh file type
        #if ext != ".vtk" or ext != ".VTK":
        #    mesh = meshio.read(file_dir)
        #    meshio.write(root + ".vtk", mesh)
        #    mesh = pv.read(head + "/" + root + ".vtk")
            # need to store elsewhere or delete .vtk file in the future
        #else:
        #    mesh = pv.read(file_dir)

        # read mesh & transform according to principal axes
        pre = trimesh.load_mesh(file_dir)
        orient = pre.principal_inertia_transform
        pre = pre.apply_transform(orient)
        pre.export('data/'+ root + '_oriented.STL')
        mesh = pv.read('data/'+ root + '_oriented.STL')

        # show transformed mesh
        self.plotter.add_mesh(mesh, show_edges=True, color="w", opacity=0.6)

        # reset plotter
        self.reset_plotter()

        # find mesh centroid and translate the mesh so that's the origin
        self.centroid()

        # show bounding box
        self.plotter.add_bounding_box(opacity=0.5, color="y")
Ejemplo n.º 6
0
def unwrap_cylindrical_vtk_using_centerline(pv_mesh,
                                            inlet_origin=None,
                                            clip=3,
                                            points_per_contour=256,
                                            number_centerline_points=384):

    assert isinstance(pv_mesh, (pt.Path, str))

    pv_mesh = pt.Path(pv_mesh)

    surface_mesh = CM.SurfaceMesh(pv_mesh.as_posix())
    pv_mesh = pv.read(pv_mesh.as_posix())

    if inlet_origin is not None:
        centerline = get_centerline_from_cylindrical_mesh(
            surface_mesh, inlet_origin=inlet_origin)
    else:
        centerline = get_centerline_from_cylindrical_mesh(surface_mesh)

    centerline = centerline(np.linspace(0, 1, number_centerline_points))

    return unwrap_cylinder_vtk_from_centerline(centerline, pv_mesh)
Ejemplo n.º 7
0
    def __init__(self, parent, image_path, scale=1, view_port=None):
        """Initialize BackgroundRenderer with an image."""
        # read the image first as we don't need to create a render if
        # the image path is invalid
        image_data = pyvista.read(image_path)

        super(BackgroundRenderer, self).__init__(parent, border=False)
        self.SetLayer(0)
        self.InteractiveOff()
        self.SetBackground(self.parent.renderer.GetBackground())
        self._scale = scale
        self._modified_observer = None
        self._prior_window_size = None
        if view_port is not None:
            self.SetViewport(view_port)

        # create image actor
        image_actor = vtk.vtkImageActor()
        image_actor.SetInputData(image_data)
        self.add_actor(image_actor, name='background')
        self.camera.ParallelProjectionOn()
        self.resize()
Ejemplo n.º 8
0
def show_obj(file_path, turning=False, output_path=''):
    print(f'Reading {file_path} file...', end=' ', flush=True)
    obj = pv.read(file_path)
    print('Done')

    plotter = pv.Plotter()
    plotter.add_mesh(obj)

    if not turning:
        plotter.show_grid()
        plotter.show()
        plotter.close()
    else:
        _, ext = path.splitext(output_path)
        if ext != '.gif':
            raise ValueError(f'{output_path} is not valid extension for gif')

        plotter.show(auto_close=False)
        orbit_path = plotter.generate_orbital_path(n_points=36, shift=obj.length)
        plotter.open_gif(output_path)
        plotter.orbit_on_path(orbit_path, write_frames=True)
        plotter.close()
Ejemplo n.º 9
0
def get_mesh_pv(self, path="temp.vtk", indices=None):
    """Return the pyvista mesh object (or submesh).

    Parameters
    ----------
    self : MeshMat
        a MeshMat object
    indices : list
        list of the points to extract (optional)

    Returns
    -------
    mesh : pyvista.core.pointset.UnstructuredGrid
        a pyvista UnstructuredGrid object
    """

    points = self.get_point()
    cells, nb_cell, indice_dict = self.get_cell()

    # for key in cells:
    for key in cells:
        cells_meshio = [(key, cells[key])
                        ]  # TODO : Generalize to any cell type

        # Write .vtk file using meshio
        meshio.write_points_cells(filename=path,
                                  points=points,
                                  cells=cells_meshio)

    # Read .vtk file with pyvista
    mesh = pv.read(path)

    # Extract submesh
    if indices is not None:
        mesh = mesh.extract_points(indices)

    os.remove(path)

    return mesh
Ejemplo n.º 10
0
    def process_file_target(self, file_to_load, age, save_path, filename=None):
        """
        Process the mesh, (vtk PolyData) in the file_to_load (.vtk) and convert it to a graph before pickling (graph, target)
        :param file_to_load: mesh in the file_to_load (.vtk) and convert it to a graph
        :param age: tensor float
        :param save_path: directory for the processed sample to be saved
        :return: 1 meaning success
        """
        if filename is None:
            fp_save_path = os.path.join(
                save_path,
                os.path.basename(file_to_load).replace(".vtk", ".pickle"))
        else:
            fp_save_path = os.path.join(save_path, filename + ".pickle")

        if os.path.exists(fp_save_path):
            # Response of 2 means this file already exists
            return 2

        # Load mesh
        mesh = pv.read(file_to_load)
        # Get the edge sources and destinations
        src, dst = zip(*self.convert_face_array_to_edge_array(
            self.build_face_array(list(mesh.faces))))
        src = np.array(src)
        dst = np.array(dst)
        # Edges are directional in DGL; Make them bi-directional.
        g = dgl.DGLGraph((torch.from_numpy(np.concatenate([src, dst])),
                          torch.from_numpy(np.concatenate([dst, src]))))

        g.ndata['features'], g.ndata['segmentation'] = self.get_node_features(
            mesh)
        g.add_edges(g.nodes(), g.nodes()
                    )  # Required Trick --> see DGL discussions somewhere sorry
        g.edata['features'] = self.get_edge_data(mesh, src, dst, g)

        self._save_data_with_pickle(fp_save_path, (g, age))

        return 1
Ejemplo n.º 11
0
    def get_indices(file_name):

        mesh_pv = pv.read(file_name)

        pts = mesh_pv.points
        pts_x = np.unique(pts[:, 0])
        pts_y = np.unique(pts[:, 1])
        pts_z = np.unique(pts[:, 2])
        indices = []

        for i in range(pts.shape[0]):
            x = pts[i, 0]
            y = pts[i, 1]
            z = pts[i, 2]

            x_index = np.where(pts_x == x)[0][0]
            y_index = np.where(pts_y == y)[0][0]
            z_index = np.where(pts_z == z)[0][0]

            indices.append([x_index, y_index, z_index])

        return indices, pts_z
Ejemplo n.º 12
0
def test_ensight_multi_block_io(extension, binary, tmpdir, ant, sphere,
                                uniform, airplane, globe):
    filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
    # multi = ex.load_bfs()  # .case file
    multi = ex.download_backward_facing_step()  # .case file
    # Now check everything
    assert multi.n_blocks == 4
    array_names = [
        'v2', 'nut', 'k', 'nuTilda', 'p', 'omega', 'f', 'epsilon', 'U'
    ]
    for block in multi:
        assert block.array_names == array_names
    # Save it out
    multi.save(filename, binary)
    foo = MultiBlock(filename)
    assert foo.n_blocks == multi.n_blocks
    for block in foo:
        assert block.array_names == array_names
    foo = pyvista.read(filename)
    assert foo.n_blocks == multi.n_blocks
    for block in foo:
        assert block.array_names == array_names
Ejemplo n.º 13
0
    def array_reshape(meshfile, data_shape, channel_firtst=True):

        mesh_pv = pv.read(meshfile)

        disps = mesh_pv.point_arrays['computedDispl']
        forces = mesh_pv.point_arrays['externalForce']
        pts = mesh_pv.points

        pts_x = np.unique(pts[:, 0])
        pts_y = np.unique(pts[:, 1])
        pts_z = np.unique(pts[:, 2])

        forces_reshape = np.zeros(data_shape)
        disps_reshape = np.zeros(data_shape)

        for i in range(forces.shape[0]):
            x = pts[i, 0]
            y = pts[i, 1]
            z = pts[i, 2]

            x_index = np.where(pts_x == x)[0][0]
            y_index = np.where(pts_y == y)[0][0]
            z_index = np.where(pts_z == z)[0][0]

            if channel_firtst:
                forces_reshape[:, x_index, y_index, z_index] = forces[i, :]
                disps_reshape[:, x_index, y_index, z_index] = disps[i, :]

            else:
                forces_reshape[x_index, y_index, z_index, :] = forces[i, :]
                disps_reshape[x_index, y_index, z_index, :] = disps[i, :]

        force_mean = np.mean(np.abs(forces), axis=0)
        disp_mean = np.mean(np.abs(disps), axis=0)
        force_std = np.std(np.abs(forces), axis=0)
        disp_std = np.std(np.abs(disps), axis=0)

        return forces_reshape, disps_reshape, force_mean, disp_mean, force_std, disp_std
Ejemplo n.º 14
0
    def load(config):
        '''
        input: data path to vtk simulation files
        return: a list includes path to individual path to each file
        '''
        FORCE_MEAN, FORCE_STD = DatasetLoader.dic_to_np(config.json_force)
        DISP_MEAN, DISP_STD = DatasetLoader.dic_to_np(config.json_disp)

        print("FORCE_MEAN", FORCE_STD)
        print("DISP_STD", DISP_STD)

        dataset = []
        knn = T.KNNGraph(k=6)
        print('[INFO] Loading dataset ...')
        for file_path in sorted(os.listdir(config.data_path)):

            # print('[INFO] Reading Folder named: {}'.format(folder))
            if file_path.split('.')[-1] == 'vtk':

                full_path = os.path.join(config.data_path, file_path)

                mesh_pv = pv.read(full_path)
                force = mesh_pv.point_arrays['externalForce']
                disp = mesh_pv.point_arrays['computedDispl']

                force_norm = (force - FORCE_MEAN) / FORCE_STD
                disp_norm = (disp - DISP_MEAN) / DISP_STD

                point_torch = torch.from_numpy(mesh_pv.points)
                disp_torch = torch.from_numpy(disp_norm)  #labels
                force_torch = torch.from_numpy(force_norm)  #node features

                data = Data(x=force_torch, y=disp_torch, pos=point_torch)

                data = knn(data)
                dataset.append(data)

        return dataset
Ejemplo n.º 15
0
def import_vtk_data(path: str = '') -> pd.DataFrame:
    '''
    Creates a pandas dataframe from path to a vtk data file.
    Also returns mesh pyvista object.
    '''
    if not path:
        path = input('Enter the path of your vtk data file: ')

    mesh = pv.read(path)

    vector_names = []

    # Detect which variables are vectors
    for var_name in mesh.array_names:
        if np.size(mesh.get_array(var_name)) != mesh.n_points:
            vector_names.append(var_name)

    # Make a dataframe from only scalar mesh arrays (i.e. exclude vectors)
    var_names = [name for name in mesh.array_names if name not in vector_names]
    var_arrays = np.transpose(
        [mesh.get_array(var_name) for var_name in var_names])
    df = pd.DataFrame(var_arrays, columns=var_names)

    # Add the vectors back with one row per component
    for vector_name in vector_names:
        # Get dimension of data e.g., 1D or 2D
        data_dim = mesh.get_array(vector_name).ndim

        if data_dim == 1:
            pass
        else:
            # Get dimension (number of columns) of typical vector
            dim = mesh.get_array(vector_name).shape[1]
            # split data using dim insteady of hard coding
            df[[vector_name + ':' + str(i)
                for i in range(dim)]] = mesh.get_array(vector_name)

    return df, mesh
Ejemplo n.º 16
0
def convert_to_structured(data):
    sys.path.append('fluidity-master')

    fileName = defaultFilePath + '/small3DLSBU/LSBU_0.vtu'
    mesh = pv.read(fileName)

    size = 64
    x = np.linspace(-359.69, 359.69, size)
    y = np.linspace(-338.13, 338.13, size)
    z = np.linspace(0.2, 250, size)
    x, y, z = np.meshgrid(x, y, z)

    grid = pv.StructuredGrid(x, y, z)
    result = grid.interpolate(mesh, radius=20.)
    result.point_arrays['Velocity'] = data   

    foo = mesh.copy()
    foo.clear_arrays()
    result2 = foo.sample(result)

    p = result2.point_arrays['Velocity']

    return p
Ejemplo n.º 17
0
    def view_obj(self):
        if sys.platform == "win32":
            homepath = os.environ["HOMEPATH"]
        if sys.platform == "linux":
            homepath = os.environ["HOME"]

        fname = QFileDialog.getOpenFileName(self, "Open file", homepath, "OBJ file (*.obj)")

        if self.ui.checkBox_wireframe.isChecked():
            edges = True
            opacity = 0.5
            text = os.path.basename(fname[0]) + " wireframe"
        else:
            edges = False
            opacity = 1
            text = os.path.basename(fname[0])

        if fname[0]:
            mesh = pyvista.read(fname[0])
            plotter = pyvistaqt.BackgroundPlotter()
            plotter.add_mesh(mesh, show_edges=edges, opacity=opacity, color='blue')
            plotter.add_text(text)
            plotter.show_axes()
Ejemplo n.º 18
0
def generate_transmissivity():
    """Generate a file with a transmissivity field from the HERTEN data."""
    import pyvista as pv
    import shutil

    print("Loading Herten data with pyvista")
    mesh = pv.read(VTK_PATH)
    herten = mesh.point_arrays["facies"].reshape(mesh.dimensions, order="F")
    # conductivity values per fazies from the supplementary data
    cond = 1e-4 * np.array(
        [2.5, 2.3, 0.61, 260, 1300, 950, 0.43, 0.006, 23, 1.4])
    # asign the conductivities to the facies
    herten_cond = cond[herten]
    # Next, we are going to calculate the transmissivity,
    # by integrating over the vertical axis
    herten_trans = np.sum(herten_cond, axis=2) * mesh.spacing[2]
    # saving some grid informations
    grid = [mesh.dimensions[:2], mesh.origin[:2], mesh.spacing[:2]]
    print("Saving the transmissivity field and grid information")
    np.savetxt("herten_transmissivity.gz", herten_trans)
    np.savetxt("grid_dim_origin_spacing.txt", grid)
    # Some cleanup. You can comment out these lines to keep the downloaded data
    os.remove("data.zip")
    shutil.rmtree("Herten-analog")
Ejemplo n.º 19
0
def get_velocity_field_structured(fileNumber):
    """
    Used to get the Velocity Field as a numpy array corresponding to a vtu file from the Fluids dataset.
    Note this normalises the returned array. 
    :param fileNumber: int or string
        Used to identify which vtu file to return
        Values are between 0 and 988
    :return: numpy array
        Tracers are returned as numpy array
    """
    folderPath = defaultFilePath + '/small3DLSBU'
    filePath = folderPath + '/LSBU_' + str(fileNumber) + '.vtu'
    sys.path.append('fluidity-master')    
    mesh = pv.read(filePath)
    p = mesh.point_arrays['Velocity']
    

    # Normalise p
    p = normalise(p, x_min, x_max)
    # Convert p into 3 x N array
    p = np.array(p)
    p = p.transpose()
    #p = np.array([p[:]])
    return p
Ejemplo n.º 20
0
    def process_set(self):
        '''Reads and processes the data. Collates the processed data which is later saved.'''
        # 0. Get meta data
        meta_data = read_meta()

        # Get the mapping for the entire dataset, in order to normalise DRAWEM labels for segmentation
        label_mapping = self.get_all_unique_labels(meta_data)

        # 1. Initialise the variables
        data_list = []

        if self.task == 'classification' and self.meta_column_idx == 3:
            categories = {'preterm', 'not_preterm'}
        elif self.task == 'segmentation':
            pass
        else:
            categories = set(meta_data[:, self.meta_column_idx]
                             )  # Set of categories {male, female}

        if not self.task == 'segmentation':
            # 2. Create category dictionary (mapping: category --> class), e.g. 'male' --> 0, 'female' --> 1
            for class_num, category in enumerate(categories):
                self.classes[category] = class_num

        # 3. These lists will collect all the information for each patient in order
        lens = []
        xs = []
        poss = []
        ys = []
        faces_list = []

        # 3. Iterate through all patient ids
        # for idx, patient_id in enumerate(meta_data[:, 0]):
        print('Processing patient data for the split...')
        for patient_idx in tqdm(self.indices_):

            patient_id, session_id = patient_idx.split('_')

            # Get file path to .vtk/.vtp for one patient
            file_path = self.get_file_path(patient_id, session_id)

            # If file exists
            if os.path.isfile(file_path):

                mesh = pv.read(file_path)

                # Get points
                points = torch.tensor(mesh.points)

                if self.add_faces:
                    # Get faces
                    n_faces = mesh.n_cells
                    faces = mesh.faces.reshape((n_faces, -1))
                    faces = torch.tensor(faces[:, 1:].transpose())

                # Features
                x = self.get_features(self.local_features, mesh)

                if not self.task == 'segmentation':
                    # Global features
                    global_x = self.get_global_features(
                        self.global_feature, meta_data, patient_idx)

                # Generating label based on the task. By default regression.
                if self.task == 'classification':
                    patient_data = meta_data[(meta_data[:, 0] == patient_id)
                                             & (meta_data[:,
                                                          1] == session_id)][0]
                    if self.task == 'classification' and self.meta_column_idx == 3:
                        y = torch.tensor([[
                            int(
                                float(patient_data[self.meta_column_idx]) <= 38
                            )
                        ] + global_x])
                    else:
                        y = torch.tensor([
                            [self.classes[patient_data[self.meta_column_idx]]
                             ] + global_x
                        ])
                    # y = torch.tensor([[self.classes[meta_data[idx, self.meta_column_idx]]] + global_x]) #TODO

                elif self.task == 'segmentation':
                    y = torch.tensor(mesh.get_array('segmentation'))

                    # Retrieve the lengths of each label tensor (i.e. number of labelled points)
                    lens.append(y.size(0))

                # Else, regression
                else:
                    patient_data = meta_data[(meta_data[:, 0] == patient_id)
                                             & (meta_data[:,
                                                          1] == session_id)][0]
                    y = torch.tensor([
                        [float(patient_data[self.meta_column_idx])] + global_x
                    ])
                    # y = torch.tensor([[float(meta_data[idx, self.meta_column_idx])] + global_x]) #TODO

                # Add the data to the lists
                xs.append(x)
                poss.append(points)
                ys.append(y)

                if self.add_faces:
                    faces_list.append(faces)

        # Now process the uniqueness of ys
        if self.task == 'segmentation':
            ys_normalised = self.normalise_labels(torch.cat(ys), label_mapping)
            ys = ys_normalised.split(lens)

        if self.add_faces:
            # Now add all patient data to the list
            for x, points, y, faces in zip(xs, poss, ys, faces_list):
                # Create a data object and add to data_list
                data = Data(x=x, pos=points, y=y, face=faces)

                data_list.append(data)

        else:
            # Now add all patient data to the list
            for x, points, y in zip(xs, poss, ys):
                # Create a data object and add to data_list
                data = Data(x=x, pos=points, y=y)

                data_list.append(data)

        # Do any pre-processing that is required
        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        # # Keeping information to look up later.
        # if self.task == 'segmentation':
        #
        #     y = torch.cat(ys).unique(return_inverse=True)[1]
        #
        #     # Get a set of unique labels (already standardized)
        #     self.unique_labels = torch.cat(self.unique_labels).unique()
        #
        #     # Get the number of unique labels
        #     self.num_labels = len(self.unique_labels)

        return self.collate(data_list)
Ejemplo n.º 21
0
def load_channels():
    """Load a uniform grid of fluvial channels in the subsurface."""
    return pyvista.read(channelsfile)
Ejemplo n.º 22
0
subsurface to create a data mask on a modeling grid. This is a particularly
useful exercise for scenarios where you may want to perform some sort of
modeling in a different manner due to geological differences on the two sides
of the fault - but still have a single modeling grid.

Let's get to it!
"""
import numpy as np

# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples

###############################################################################
path, _ = examples.downloads._download_file("opal_mound_fault.vtk")
fault = pv.read(path)
fault

###############################################################################
# Create the modelling grid if you don't already have one
grid = pv.UniformGrid()
# Bottom south-west corner
grid.origin = (329700, 4252600, -2700)
# Cell sizes
grid.spacing = (500, 500, 500)
# Number of cells in each direction
grid.dimensions = (30, 35, 10)
grid

###############################################################################
# Take a quick preview to see where the fault is inside of the grid
Ejemplo n.º 23
0
    # Set boundary markers of the mesh to 23
    for faceID in mesh.faceIDs:
        faceID.data().marker = 23

    # Get the root metadata
    gInfo = mesh.getRoot()
    gInfo.ishole = True  # Don't mesh the inside of
    gInfo.marker = -1

    path = Path(args.molecule)
    obj_name = f'{path.stem}.obj'
    pygamer.writeOBJ(obj_name, mesh)

    # Pyvista
    mesh = pv.read(obj_name)
    shell = mesh.decimate(0.97, volume_preservation=True).extract_surface()
    print(f'Decimation: {len(mesh.points)} -> {len(shell.points)}')

    # warp each point by the normal vectors
    for i in range(1, int(args.distance) + 1):
        print(f'Expanding: {i}')
        shell = shell.compute_normals()
        warp = vtk.vtkWarpVector()
        warp.SetInputData(shell)
        warp.SetInputArrayToProcess(0, 0, 0,
                                    vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS,
                                    vtk.vtkDataSetAttributes.NORMALS)
        warp.SetScaleFactor(2)
        warp.Update()
        shell = pv.wrap(warp.GetOutput())
Ejemplo n.º 24
0
class Cloudsampler(QtCore.QObject):
    signalStatus = QtCore.pyqtSignal(str)
    signaloutput = QtCore.pyqtSignal(pv.PolyData)
    signal_full_pc_out = QtCore.pyqtSignal(pv.PolyData)

    plotfile = 'test_objects/stl/chair.stl'
    mesh = pv.read(plotfile)
    mesh_points = mesh.points
    point_cloud = pv.PolyData(mesh_points)
    sampsize = 1000

    sampcloud = []
    sampcl = pv.PolyData()

    def __init__(self, parent=None):
        super(self.__class__, self).__init__(parent)

    @QtCore.pyqtSlot()
    def startWork(self):
        ''' Test if GUI free while in permanent loop
        while True:
            print('stuck in loop')
        '''
        i = 0
        indices = []
        self.sampcloud = []
        sampcl = pv.PolyData()

        while i < self.sampsize:
            rand_pt_idx = random.randint(0, len(self.mesh_points) - 1)
            if rand_pt_idx not in indices:
                indices.append(rand_pt_idx)
                i += 1
            elif rand_pt_idx in indices:
                pass

        for each in indices:
            self.sampcloud.append(self.mesh_points[each])
        print(self.sampcloud)
        self.sampcl = pv.PolyData(self.sampcloud)
        self.signaloutput.emit(self.sampcl)
        self.signal_full_pc_out.emit(self.point_cloud)
        self.signalStatus.emit('Done Sampling')

    def savetotensfile(self):
        listtenstosave = []
        for each in self.sampcloud:
            vertex = []
            vertex.append(float(each[0]))
            vertex.append(float(each[1]))
            vertex.append(float(each[2]))
            listtenstosave.append(vertex)
        nodepostens = torch.tensor(listtenstosave)
        #print(nodepostens)
        print(len(nodepostens))
        knn_g_edges = dgl.knn_graph(nodepostens, k=20)
        print(knn_g_edges)
        torch.save(nodepostens.clone(), 'nodepostens.pt')
        torch.save(knn_g_edges.clone(), 'knn_g_edges.pt')
        self.signalStatus.emit('Tensor files saved')

    def fixfile(self):
        self.mesh = pv.read(self.plotfile)
        self.mesh_points = self.mesh.points
        self.point_cloud = pv.PolyData(self.mesh_points)
Ejemplo n.º 25
0
def test_read_rectilinear_grid_from_file():
    grid = pyvista.read(examples.rectfile)
    assert grid.n_cells == 16146
    assert grid.n_points == 18144
    assert grid.bounds == [-350.0, 1350.0, -400.0, 1350.0, -850.0, 0.0]
    assert grid.n_arrays == 1
Ejemplo n.º 26
0
 def fixfile(self):
     self.mesh = pv.read(self.plotfile)
     self.mesh_points = self.mesh.points
     self.point_cloud = pv.PolyData(self.mesh_points)
Ejemplo n.º 27
0
import pyvista as pv
import pandas as pd
import numpy as np
import time

# Read & Load VTM (Multiblock VTK)
# load all read files into lst_pv list

t0 = time.time()
lst = ['fastpoly_10.vtm', 'fastpoly_20.vtm']
lst_pv = []
for i in lst:
    tpv = pv.read(i)
    lst_pv.append(tpv)  # load all read files into lst_pv
t1 = time.time()
tt = t1 - t0
print(f'load time: {tt}')
Ejemplo n.º 28
0
import pyvista as pv


def plot(mesh):
    plotter = pv.Plotter()
    plotter.add_mesh(mesh)
    plotter.camera.up = (0, 0, -1)
    plotter.camera.Azimuth(90)
    plotter.camera.Elevation(60)
    plotter.show_axes()
    plotter.show_grid()
    plotter.show()


talon = pv.read("talon-original.stl")
talon.rotate_z(180)
talon.rotate_x(180)
talon.translate([0, 0, 27])
talon.points /= -talon.bounds[0]
talon.save("talon.stl")
plot(talon)

yf = pv.read("yf23-original.stl")
yf.rotate_z(-90)
yf.rotate_x(180)
yf.translate([yf.bounds[0], 0, 0])
yf.points /= -yf.bounds[0]
yf.save("yf23.stl")
plot(yf)
basedir = os.getcwd()

# Read json file
inputfile = sys.argv[1]
with open(inputfile) as json_file:
    json_dat = json.load(json_file)

# Parse json options
casename = json_dat['casename']
datadir = json_dat['datadir']
if datadir == '.':
    datadir = basedir

# Read in base vtk file
dataloc = os.path.join(datadir, 'CFD_DATA', casename)
grid = pv.read(os.path.join(dataloc, 'basegrid.vtk'))
coords = grid.points[:, :2]

# Read in corr matrix
dataloc = os.path.join(datadir, 'PROCESSED_DATA', casename)
print('Reading on correlation matrices...')
corr = []
for var in vars:
    print(var)
    corr.append(np.load(os.path.join(dataloc, 'corr_%d.npy' % var)))

###########################################################
# Save correlations for given points (and vars) to vtk file
###########################################################
for p, pt in enumerate(points):
    # Find nearest point
Ejemplo n.º 30
0
mic_xyz = pd.read_csv(
    'data/DLTdv7_data_2018-08-17_P02_micpointsxyzpts.csv').dropna()
mic_xyz = [mic_xyz.loc[each, :].to_numpy() for each in mic_xyz.index]
mic_xyzh = [np.append(each, 1) for each in mic_xyz]

# Now move the mic from camera calibration space to LiDAR space.
mic_lidar = [np.matmul(A, each) for each in mic_xyzh]

# and include round 2 transformation
mic_lidarv2 = [np.matmul(B, np.append(each, 1))[:-1] for each in mic_lidar]

#%%
# Now load the triangulated mesh which represents a sub-section of the cave - as
# cut out by Julian

mesh = pv.read('data/lidar_roi.ply')

#%%
plotter = pv.Plotter()
plotter.add_mesh(mesh, show_edges=True, color=True)

#%% And also add the microphones as small spheres
# in the LiDAR scan. If everything is correct -- we should see
# the wall mics ON/CLOSE to the cave wall, and the tristar sticking out with
# the obvious shape on the right side of the cave.

mics = [pv.Sphere(radius=0.05, center=each) for each in mic_lidarv2]
for mic in mics:
    plotter.add_mesh(mic)

# only required if you want to re-run the video making code - to change camera