def point_to_lines_dict(self, points_dict): """build lines dictionary from points dictionary Args: points_dict(dict()): points dictionary Returns: lines_dict(dict()): build lines between same well points """ lines_dict = {} for bore_id in points_dict: poly = PVGeo.points_to_poly_data(points_dict[bore_id]) lines_dict["{0}".format( bore_id)] = PVGeo.filters.AddCellConnToPoints( nearest_nbr=True).apply(poly) # notice that the building of the lines need to follow the nearest neighbourhood search return lines_dict
################################################################################ # Let's go ahead and load a simple file that has XYZ coordinates and a boolean # array for fault presence. This point cloud makes some sort of regular grid, # but we have forgotten the deatials of the cell spacings and local coordinate # rotations. # # We will read in this data with ``pandas`` and send it to the # :func:`PVGeo.points_to_poly_data` helper to create a :class:`vista.PolyData` # object (essentially a point cloud). points = pd.read_csv(fault_file) print(points[0:2]) ################################################################################ vtkpoints = PVGeo.points_to_poly_data(points) print(vtkpoints) ################################################################################ # Note that we have a :class:`vista.PolyData` object now which allows us to do # all types of immediate plotting of our data. First, lets threshold our points # as the point cloud has a bunch of zeros and ones throughout the dataspace to # describe the presence of a fault. # # To threshold the points, we call the threshold filter directly on our data # object and pass the thresholding value. We can then plot the result by # calling the plot function. (Note: change the notebook parameter to # ``False`` for an interactive window) vtkpoints.plot(clim=[0, 1], point_size=1) ################################################################################
############################################################################### temp_grid = temperature_data['kriged_temperature_model'] temp_grid_cropped = temp_grid.clip_box(gdc19.get_roi_bounds(), invert=False) # Remove values above topography temp_grid_no_topo = PVGeo.grids.ExtractTopography( remove=True, # remove the inactive cells tolerance=10.0 # buffer around the topo surface ).apply(temp_grid_cropped, topo) temp_roi = temp_grid_no_topo.threshold([175., 225.]) ############################################################################### well_locs = pd.read_csv(gdc19.get_well_path('well_location_from_earth_model.csv')) well_locs = PVGeo.points_to_poly_data(well_locs[['x', 'y', 'z (land surface)']].values).clip_box( gdc19.get_roi_bounds(), invert=False) WELLS = gdc19.load_well_db() proposed = PVGeo.filters.AddCellConnToPoints().apply(WELLS.pop('well_new2'))#vtki.MultiBlock() well_5832 = PVGeo.filters.AddCellConnToPoints().apply(WELLS.pop('well_5832')) #well_5832.set_active_scalar('ECGR') well_Acord1 = PVGeo.filters.AddCellConnToPoints().apply(WELLS.pop('well_Acord1')) #well_Acord1 = WELLS.set_active_scalar('GR_SPLICE (GAPI)') ############################################################################### # load the gravity model gf = gdc19.get_gravity_path('forge_inverse_problem/RESULT_THRESHED.vtu') grav_model = vtki.read(gf) grav_model.active_scalar_name = 'Magnitude'
def read_surface_verts(filename, grid=False): surf = pd.read_csv(filename) if grid: return grid_surface(surf.values) return PVGeo.points_to_poly_data(surf.values)
import numpy as np import vtk from vtk.numpy_interface import dataset_adapter as dsa import PVGeo from PVGeo.filters import CombineTables ################################################################################ # Create some input tables t0 = vtk.vtkTable() t1 = vtk.vtkTable() # Populate the tables n = 100 titles = ('Array 0', 'Array 1', 'Array 2') arr0 = np.random.random(n) # Table 0 arr1 = np.random.random(n) # Table 0 t0.AddColumn(PVGeo.convert_array(arr0, titles[0])) t0.AddColumn(PVGeo.convert_array(arr1, titles[1])) arr2 = np.random.random(n) # Table 1 t1.AddColumn(PVGeo.convert_array(arr2, titles[2])) arrs = [arr0, arr1, arr2] ################################################################################ # Now use the `CombineTables` filter: output = CombineTables().apply(t0, t1) # Here I verify the result wpdi = dsa.WrapDataObject(output) for i in range(len(titles)): arr = wpdi.RowData[titles[i]]
from PVGeo.filters import ReshapeTable ################################################################################ # Create some input table t0 = vtk.vtkTable() # Populate the tables arrs = [None, None, None] n = 400 ncols = 2 nrows = int(n * len(arrs) / ncols) titles = ('Array 0', 'Array 1', 'Array 2') arrs[0] = np.random.random(n) arrs[1] = np.random.random(n) arrs[2] = np.random.random(n) t0.AddColumn(PVGeo.convert_array(arrs[0], titles[0])) t0.AddColumn(PVGeo.convert_array(arrs[1], titles[1])) t0.AddColumn(PVGeo.convert_array(arrs[2], titles[2])) ################################################################################ # Use the filter to reshape the table order = 'F' newtitles = ['Title %d' % i for i in range(ncols)] output = ReshapeTable(order=order, ncols=ncols, nrows=nrows, names=newtitles).apply(t0) ################################################################################ # Check the output against NumPy wpdi = dsa.WrapDataObject(output) tarr = np.zeros((nrows, ncols)) for i in range(ncols):
import PVGeo PVGeo.test()