def interp(self,xt,yt,zorigt): x=xt[:,zorigt>=0.0] y=yt[:,zorigt>=0.0] z=zorigt[:,zorigt>=0.0] cens,edg,tri,neig = D.delaunay(x,y) if 0: #plot triangulation for t in tri: # t[0], t[1], t[2] are the points indexes of the triangle t_i = [t[0], t[1], t[2], t[0]] pylab.plot(x[t_i],y[t_i]) #pylab.plot(x,y,'o') #pylab.show() tri = D.Triangulation(x,y) interp = tri.nn_interpolator(z) xi=N.copy(x) yi=N.copy(y) xi=N.concatenate((xi,self.line_x)) yi=N.concatenate((yi,self.line_y)) zi = interp(xi,yi) mylocator=locator(xi,yi) inds=mylocator.inside(self.line_x,self.line_y) outxi=xi[inds.astype(int)] outyi=yi[inds.astype(int)] outzi=zi[inds.astype(int)] return outxi,outyi,outzi
def __init__(self, xg, yg, verts): self.xg = np.asarray(xg) self.yg = np.asarray(yg) self.verts = verts self._xv, self._yv = zip(*verts) self._tri = delaunay.Triangulation(self.xg.flat, self.yg.flat)
def test_delaunay(): # No duplicate points. x = [0, 1, 1, 0] y = [0, 0, 1, 1] npoints = 4 ntriangles = 2 nedges = 5 # Without duplicate points, mpl calls delaunay triangulation and # does not modify it. mpl_triang = mtri.Triangulation(x, y) del_triang = mdel.Triangulation(x, y) # Points - floating point. assert_array_almost_equal(mpl_triang.x, x) assert_array_almost_equal(mpl_triang.x, del_triang.x) assert_array_almost_equal(mpl_triang.y, y) assert_array_almost_equal(mpl_triang.y, del_triang.y) # Triangles - integers. assert_equal(len(mpl_triang.triangles), ntriangles) assert_equal(np.min(mpl_triang.triangles), 0) assert_equal(np.max(mpl_triang.triangles), npoints-1) assert_array_equal(mpl_triang.triangles, del_triang.triangle_nodes) # Edges - integers. assert_equal(len(mpl_triang.edges), nedges) assert_equal(np.min(mpl_triang.edges), 0) assert_equal(np.max(mpl_triang.edges), npoints-1) assert_array_equal(mpl_triang.edges, del_triang.edge_db)
def __init__(self, x, y, triangles=None, mask=None): """ Create a Triangulation object. The first two arguments must be: *x*, *y*: arrays of shape (npoints). Point coordinates. Optional arguments (args or keyword args): *triangles*: integer array of shape (ntri,3). For each triangle, the indices of the three points that make up the triangle. If the points are ordered in a clockwise manner, they are converted to anticlockwise. If not specified, matplotlib.delaunay is used to create a Delaunay triangulation of the points. *mask*: optional boolean array of shape (ntri). Which triangles are masked out. """ self.x = np.asarray(x, dtype=np.float64) self.y = np.asarray(y, dtype=np.float64) if self.x.shape != self.y.shape or len(self.x.shape) != 1: raise ValueError("x and y must be equal-length 1-D arrays") self.mask = None self._edges = None self._neighbors = None if triangles is None: # No triangulation specified, so use matplotlib.delaunay. dt = delaunay.Triangulation(self.x, self.y) self.triangles = np.asarray(dt.triangle_nodes, dtype=np.int32) if mask is None: self._edges = np.asarray(dt.edge_db, dtype=np.int32) # Delaunay triangle_neighbors uses different edge indexing, # so convert. neighbors = np.asarray(dt.triangle_neighbors, dtype=np.int32) self._neighbors = np.roll(neighbors, 1, axis=1) else: # Triangulation specified. self.triangles = np.asarray(triangles, dtype=np.int32) if self.triangles.ndim != 2 or self.triangles.shape[1] != 3: raise ValueError('triangles must be a (?,3) array') if self.triangles.max() >= len(self.x): raise ValueError('triangles max element is out of bounds') if self.triangles.min() < 0: raise ValueError('triangles min element is out of bounds') if mask is not None: self.mask = np.asarray(mask, dtype=np.bool) if len(self.mask.shape) != 1 or \ self.mask.shape[0] != self.triangles.shape[0]: raise ValueError('mask array must have same length as ' 'triangles array') # Underlying C++ object is not created until first needed. self._cpp_triangulation = None
def findWonkyVectors(x, y, dx, dy, tol=100): from PYME.Analysis.LMVis.visHelpers import genEdgeDB T = delaunay.Triangulation(x, y) edb = genEdgeDB(T) wonkyVecs = np.zeros(len(x)) #remove any shifts which are markedly different from their neighbours for i in range(len(x)): incidentEdges = T.edge_db[edb[i][0]] # d_x = np.diff(T.x[incidentEdges]) # d_y = np.diff(T.y[incidentEdges]) # # dist = (d_x**2 + d_y**2) # # di = np.mean(np.sqrt(dist)) neighb = incidentEdges.ravel() neighb = neighb[(neighb == i) < .5] if (abs(dx[i] - np.median(dx[neighb])) > tol) or (abs(dy[i] - np.median(dy[neighb])) > tol): wonkyVecs[i] = 1 return wonkyVecs > .5
def test_delaunay_duplicate_points(): # Issue 838. import warnings # Index 2 is the same as index 0. x = [0,1,0,1,0] y = [0,0,0,1,1] duplicate_index = 2 npoints = 4 # Number of non-duplicate points. nduplicates = 1 ntriangles = 2 nedges = 5 # With duplicate points, mpl calls delaunay triangulation but # modified returned arrays. warnings.simplefilter("ignore") # Ignore DuplicatePointWarning. mpl_triang = mtri.Triangulation(x,y) del_triang = mdel.Triangulation(x,y) warnings.resetwarnings() # Points - floating point. assert_equal(len(mpl_triang.x), npoints + nduplicates) assert_equal(len(del_triang.x), npoints) assert_array_almost_equal(mpl_triang.x, x) assert_array_almost_equal(del_triang.x[:duplicate_index], x[:duplicate_index]) assert_array_almost_equal(del_triang.x[duplicate_index:], x[duplicate_index+1:]) assert_equal(len(mpl_triang.y), npoints + nduplicates) assert_equal(len(del_triang.y), npoints) assert_array_almost_equal(mpl_triang.y, y) assert_array_almost_equal(del_triang.y[:duplicate_index], y[:duplicate_index]) assert_array_almost_equal(del_triang.y[duplicate_index:], y[duplicate_index+1:]) # Triangles - integers. assert_equal(len(mpl_triang.triangles), ntriangles) assert_equal(np.min(mpl_triang.triangles), 0) assert_equal(np.max(mpl_triang.triangles), npoints-1 + nduplicates) assert_equal(len(del_triang.triangle_nodes), ntriangles) assert_equal(np.min(del_triang.triangle_nodes), 0) assert_equal(np.max(del_triang.triangle_nodes), npoints-1) # Convert mpl triangle point indices to delaunay's. converted_indices = np.where(mpl_triang.triangles > duplicate_index, mpl_triang.triangles - nduplicates, mpl_triang.triangles) assert_array_equal(del_triang.triangle_nodes, converted_indices) # Edges - integers. assert_equal(len(mpl_triang.edges), nedges) assert_equal(np.min(mpl_triang.edges), 0) assert_equal(np.max(mpl_triang.edges), npoints-1 + nduplicates) assert_equal(len(del_triang.edge_db), nedges) assert_equal(np.min(del_triang.edge_db), 0) assert_equal(np.max(del_triang.edge_db), npoints-1) # Convert mpl edge point indices to delaunay's. converted_indices = np.where(mpl_triang.edges > duplicate_index, mpl_triang.edges - nduplicates, mpl_triang.edges) assert_array_equal(del_triang.edge_db, converted_indices)
def warpCorrectRedImage(r, dx, dy): X, Y = sp.meshgrid(np.arange(0, 512 * 70, 70), np.arange(0, 256 * 70, 70)) cx, cy = getCorrection(X, Y, dx, dy) T = delaunay.Triangulation((X + cx).ravel(), (Y + cy).ravel()) In = delaunay.LinearInterpolator(T, r.T.ravel(), r.min()) vals = In[0:256 * 70:256 * 1j, 0:512 * 70:512 * 1j] return vals.T
def jitMCT(self, x, y, jsig, mcp): from matplotlib import delaunay Imc = numpy.random.normal(size=len(x)) < mcp if type(jsig) == numpy.ndarray: #print jsig.shape, Imc.shape jsig = jsig[Imc] T = delaunay.Triangulation( x[Imc] + jsig * numpy.random.normal(size=Imc.sum()), y[Imc] + jsig * numpy.random.normal(size=Imc.sum())) self.setTriang(T)
def ch_subsets_2d(ChLocsArr): T = sd.Triangulation(*ChLocsArr.transpose()) triangles = T.triangle_nodes tri_adjacency = dict([(tri, nb_tris[nb_tris != -1]) for (tri, nb_tris) in enumerate(T.triangle_neighbors) ]) subsets = set() for src, targs in tri_adjacency.items(): for targ in targs: if src < targ: subsets.add( tuple(set(triangles[src]).union(set(triangles[targ])))) return list(subsets)
def rendJitTri(im, x, y, jsig, mcp, imageBounds, pixelSize, n=1): for i in range(n): #global jParms #locals().update(jParms) scipy.random.seed() Imc = scipy.rand(len(x)) < mcp if type(jsig) == numpy.ndarray: #print jsig.shape, Imc.shape jsig = jsig[Imc] T = delaunay.Triangulation(x[Imc] + jsig * scipy.randn(Imc.sum()), y[Imc] + jsig * scipy.randn(Imc.sum())) #return T rendTri(T, imageBounds, pixelSize, im=im)
def execute(self, namespace): from matplotlib import delaunay from PYME.LMVis import visHelpers pos = namespace[self.inputPositions] x, y = pos['x'], pos['y'] #triangulate the data T = delaunay.Triangulation(x + .1*np.random.normal(size=len(x)), y + .1*np.random.normal(size=len(x))) #find the average edge lengths leading away from a given point res = np.array(visHelpers.calcNeighbourDists(T)) res = tabular.DictSource({self.key:res}) if 'mdh' in dir(pos): res.mdh = pos.mdh namespace[self.outputName] = res
def rendJitTriang(x, y, n, jsig, mcp, imageBounds, pixelSize): sizeX = (imageBounds.x1 - imageBounds.x0) / pixelSize sizeY = (imageBounds.y1 - imageBounds.y0) / pixelSize im = numpy.zeros((sizeX, sizeY)) for i in range(n): Imc = scipy.rand(len(x)) < mcp if type(jsig) == numpy.ndarray: #print jsig.shape, Imc.shape jsig = jsig[Imc] T = delaunay.Triangulation(x[Imc] + jsig * scipy.randn(Imc.sum()), y[Imc] + jsig * scipy.randn(Imc.sum())) rendTri(T, imageBounds, pixelSize, im=im) return im / n
def __init__(self, x, y, triangles=None, mask=None): self.x = np.asarray(x, dtype=np.float64) self.y = np.asarray(y, dtype=np.float64) if self.x.shape != self.y.shape or len(self.x.shape) != 1: raise ValueError("x and y must be equal-length 1-D arrays") self.mask = None self._edges = None self._neighbors = None if triangles is None: # No triangulation specified, so use matplotlib.delaunay. dt = delaunay.Triangulation(self.x, self.y) self.triangles = np.asarray(dt.to_client_point_indices( dt.triangle_nodes), dtype=np.int32) if mask is None: self._edges = np.asarray(dt.to_client_point_indices( dt.edge_db), dtype=np.int32) # Delaunay triangle_neighbors uses different edge indexing, # so convert. neighbors = np.asarray(dt.triangle_neighbors, dtype=np.int32) self._neighbors = np.roll(neighbors, 1, axis=1) else: # Triangulation specified. Copy, since we may correct triangle # orientation. self.triangles = np.array(triangles, dtype=np.int32) if self.triangles.ndim != 2 or self.triangles.shape[1] != 3: raise ValueError('triangles must be a (?,3) array') if self.triangles.max() >= len(self.x): raise ValueError('triangles max element is out of bounds') if self.triangles.min() < 0: raise ValueError('triangles min element is out of bounds') if mask is not None: self.mask = np.asarray(mask, dtype=np.bool) if (len(self.mask.shape) != 1 or self.mask.shape[0] != self.triangles.shape[0]): raise ValueError('mask array must have same length as ' 'triangles array') # Underlying C++ object is not created until first needed. self._cpp_triangulation = None # Default TriFinder not created until needed. self._trifinder = None
def prep_data2(xt, yt, zorigt): x = xt[:, zorigt > 0.0] y = yt[:, zorigt > 0.0] z = zorigt[:, zorigt > 0.0] print 'reached' threshold = 0.0 print 'where masked ', z.shape xi, yi = N.mgrid[-5:5:100j, -5:5:100j] xi, yi = N.mgrid[x.min():x.max():.001, y.min():y.max():.001] # triangulate data tri = D.Triangulation(x, y) print 'before interpolator' # interpolate data interp = tri.nn_interpolator(z) print 'interpolator reached' zi = interp(xi, yi) return xi, yi, zi
def do_projection(proj, lon_lats, dists, xres=120, yres=100): xys = np.array([proj(lon, lat) for (lon, lat) in lon_lats]) x = xys[:, 0] y = xys[:, 1] good = x < 1e29 # basemap seems to set bad values to 1e30 x = x[good] y = y[good] dists = dists[good] tri = dlny.Triangulation(x, y) interp = tri.nn_interpolator(dists) X, Y = np.mgrid[min(y):max(y):yres * 1j, min(x):max(x):xres * 1j] vals = interp[min(y):max(y):yres * 1j, min(x):max(x):xres * 1j] Z = np.ma.masked_array(vals, mask=np.isnan(vals)) return x, y, X, Y, Z
def getTriangles(self, recalc=False): from matplotlib import delaunay if self.Triangles == None or recalc: statTri = statusLog.StatusLogger("Generating Triangulation ...") self.Triangles = delaunay.Triangulation( self.colourFilter['x'] + .1 * np.random.normal(size=len(self.colourFilter['x'])), self.colourFilter['y'] + .1 * np.random.normal(size=len(self.colourFilter['x']))) #reset things which will have changed self.edb = None try: self.GeneratedMeasures.pop('neighbourDistances') except KeyError: pass return self.Triangles
def triangle_graph(Locs): """ Returns a graph giving the Delaunay triangulation of a set of two-dimensional points Parameters ------- Locs : ndarray, or anything that gets cast into ndarray upon np.array(Locs) Locs.shape = (n,2), where n is the number of points Returns ------ out : networkx Graph """ Locs = np.array(Locs) if len(Locs) == 1: return complete_graph(1) else: Triangulation = sd.Triangulation(Locs[:, 0], Locs[:, 1]) return Triangulation.node_graph()
def extrapolate_mask(a, mask=None): if mask is None and not isinstance(a, np.ma.MaskedArray): return a if mask is None: mask = a.mask else: if isinstance(a, np.ma.MaskedArray): mask = mask | a.mask a = a[:] # make a copy of array a jj, ii = indices(a.shape) igood = ii[~mask] jgood = jj[~mask] ibad = ii[mask] jbad = jj[mask] tri = delaunay.Triangulation(igood, jgood) # interpolate from the good points (mask == 1) interp = tri.nn_extrapolator(a[~mask]) # to the bad points (mask == 0) a[mask] = interp(ibad, jbad) return a
def genShiftVectors(res_g, res_r): ind1 = (res_g['fitResults']['A'] > 10) * ( res_g['fitResults']['A'] < 500 ) * (res_g['fitResults']['sigma'] > 100) * ( res_g['fitResults']['sigma'] < 400) * (res_g['fitError']['x0'] < 50) x = res_g['fitResults']['x0'][ind1] x = x + 0.5 * sp.randn( len(x) ) #add a bit of 'fuzz' to avoid duplicate points which could crash interpolation y = res_g['fitResults']['y0'][ind1] sx = res_g['fitResults']['x0'][ind1] - res_r['fitResults']['x0'][ind1] sy = res_g['fitResults']['y0'][ind1] - res_r['fitResults']['y0'][ind1] T = delaunay.Triangulation(x, y) nx = [] ny = [] nsx = [] nsy = [] #remove any shifts which are markedly different from their neighbours for i in range(len(x)): i1, i2 = np.where(T.edge_db == i) i_ = T.edge_db[i1, 1 - i2] if (abs(sx[i] - np.median(sx[i_])) < 100) and (abs(sy[i] - np.median(sy[i_])) < 100): nx.append(x[i]) ny.append(y[i]) nsx.append(sx[i]) nsy.append(sy[i]) else: print(('point %d dropped' % i)) nx = np.array(nx) ny = np.array(ny) nsx = np.array(nsx) nsy = np.array(nsy) return (nx, ny, nsx, nsy)
def extrapolate_data(dataset, basemap, gridsize_x, gridsize_y, maskoceans=False): """ Extrapolate `dataset` on a grid of size `(gridsize_x, gridsize_y)` based on `basemap`. A regular grid of the user-defined size is created from the basemap. The dataset coordinates are then Delaunay triangulated, and the corresponding data extrapolated on the regular grid using the nearest-neighbor method Parameters ---------- dataset : ndarray A structured ndarray, w/ fields ['lon', 'lat', 'data'] basemap : Basemap The projection basemap gridsize_x : int Number of cells in the x direction ('lon') gridsize_y : int Number of cells in the x direction ('lat') maskoceans : """ # Get the grid (glon, glat, gx, gy) = basemap.makegrid(gridsize_x, gridsize_y, returnxy=True) # Transforms the lon/lat of the dataset in basemap units (llon, llat) = basemap(dataset['lon'], dataset['lat']) # Triangulate the dataset triangul = delaunay.Triangulation(llon, llat) # Define an extrapolator (using natural neighbors)... # ... and extrapolate the data along the grid... extrapolator = triangul.nn_extrapolator(dataset['data']) extrapolated = ma.fix_invalid(extrapolator(gx, gy)) if maskoceans: extrapolated = mtb.maskoceans(glon, glat, extrapolated) return (extrapolated, gx, gy)
def data_to_S1(x, y, mesh, **values): "2D data clouds of vector valued functions => dolfin S1 functions" trid = dln.Triangulation(x, y) #mesh = nanopores.RectangleMesh([0, -Ry], [Rx, Ry], Nx, Ny) functions = [] for F in values.values(): Fi = [None] * 2 for i in (0, 1): intp = trid.nn_interpolator(F[:, i]) intp2 = lambda x: intp([x[0]], [x[1]]) Fi[i] = lambda_to_S1(intp2, mesh) V = dolfin.VectorFunctionSpace(mesh, "CG", 1) FF = dolfin.Function(V) dolfin.assign(FF, [Fi[0], Fi[1]]) functions.append(FF) if len(functions) == 1: return functions[0] else: return tuple(functions)
def getBlobs(self): from PYME.Analysis.EdgeDB import edges tri = self.getTriangles() edb = self.getEdb() if self.blobJitter == 0: self.objIndices = edges.objectIndices( edb.segment(self.objThreshold), self.objMinSize) self.objects = [ np.vstack((tri.x[oi], tri.y[oi])).T for oi in self.objIndices ] else: from matplotlib import delaunay ndists = self.getNeighbourDists() x_ = np.hstack([ self['x'] + 0.5 * ndists * np.random.normal(size=ndists.size) for i in range(self.blobJitter) ]) y_ = np.hstack([ self['y'] + 0.5 * ndists * np.random.normal(size=ndists.size) for i in range(self.blobJitter) ]) T = delaunay.Triangulation(x_, y_) edb = edges.EdgeDB(T) objIndices = edges.objectIndices(edb.segment(self.objThreshold), self.objMinSize) self.objects = [ np.vstack((T.x[oi], T.y[oi])).T for oi in objIndices ] return self.objects, self.objThreshold
def prep_data2(xt,yt,zorigt): # Data=pylab.load(r'c:\resolution_stuff\1p4K.iexy') #Data=pylab.load(filename) #xt=Data[:,2] #yt=Data[:,3] #zorigt=Data[:,0] x=xt[:,zorigt>0.0] y=yt[:,zorigt>0.0] z=zorigt[:,zorigt>0.0] # zorig=ma.array(zorigt) print 'reached' threshold=0.0; # print zorigt < threshold # print N.isnan(zorigt) # z = ma.masked_where(zorigt < threshold , zorigt) print 'where masked ', z.shape #should be commented out--just for testing ## x = pylab.randn(Nu)/aspect ## y = pylab.randn(Nu) ## z = pylab.rand(Nu) ## print x.shape ## print y.shape # Grid xi, yi = N.mgrid[-5:5:100j,-5:5:100j] xi,yi=N.mgrid[x.min():x.max():.001,y.min():y.max():.001] # triangulate data tri = D.Triangulation(x,y) print 'before interpolator' # interpolate data interp = tri.nn_interpolator(z) print 'interpolator reached' zi = interp(xi,yi) # or, all in one line # zi = Triangulation(x,y).nn_interpolator(z)(xi,yi) # return x,y,z return xi,yi,zi
def readgrid(grid_filename, vert_filename=None, llcrnrlon=-98.5, llcrnrlat=22.5, urcrnrlon=-87.5, urcrnrlat=31.0, lat_0=30, lon_0=-94, res='i', usebasemap=False, usespherical=True): ''' readgrid(loc) Kristen Thyng, March 2013 This function should be read in at the beginnind of a run.py call. It reads in all necessary grid information that won't change in time and stores it in a dictionary called grid. All arrays are changed to Fortran ordering (from Python ordering) and to tracmass variables ordering from ROMS ordering i.e. from [t,k,j,i] to [i,j,k,t] right away after reading in. Input: grid_filename File name (with extension) where grid information is stored vert_filename (optional) File name (with extension) where vertical grid information is stored, if not in grid_loc. Can also skip this if don't need vertical grid info. also optional basemap box parameters. Default is for full shelf model. usebasemap (False) Whether to use load basemap into grid (True) or pyproj (False). Basemap is slower but can be used for plotting, and pyproj is the opposite. Output: grid Dictionary containing all necessary time-independent grid fields grid dictionary contains: (array sizing is for tracmass ordering) imt,jmt,km Grid index sizing constants in (x,y,z), are for horizontal rho grid [scalar] dxv Horizontal grid cell walls areas in x direction [imt,jmt-1] dyu Horizontal grid cell walls areas in y direction [imt-1,jmt] dxdy Horizontal area of cells defined at cell centers [imt,jmt] mask Land/sea mask [imt,jmt] pm,pn Difference in horizontal grid spacing in x and y [imt,jmt] kmt Number of vertical levels in horizontal space [imt,jmt] dzt0 Thickness in meters of grid at each k-level with time-independent free surface. Surface is at km [imt,jmt,km]. zrt0 Depth in meters of grid at each k-level on vertical rho grid with time-independent free surface. Surface is at km [imt,jmt,km] zwt0 Depth in meters of grid at each k-level on vertical w grid with time-independent free surface. Surface is at km [imt,jmt,km] xr,yr Rho grid zonal (x) and meriodional (y) coordinates [imt,jmt] xu,yu U grid zonal (x) and meriodional (y) coordinates [imt,jmt] xv,yv V grid zonal (x) and meriodional (y) coordinates [imt,jmt] xpsi,ypsi Psi grid zonal (x) and meriodional (y) coordinates [imt,jmt] X,Y Grid index arrays tri,trir Delaunay triangulations Cs_r,sc_r Vertical grid streching paramters [km-1] hc Critical depth [scalar] h Depths [imt,jmt] theta_s Vertical stretching parameter [scalar]. A parameter (typically 0.0 <= theta_s < 5.0) that defines the amount of grid focusing. A higher value for theta_s will focus the grid more. theta_b Vertical stretching parameter [scalar]. A parameter (0.0 < theta_b < 1.0) that says whether the coordinate will be focused at the surface (theta_b -> 1.0) or split evenly between surface and bottom (theta_b -> 0) basemap Basemap object Note: all are in fortran ordering and tracmass ordering except for X, Y, tri, and tric To test: [array].flags['F_CONTIGUOUS'] will return true if it is fortran ordering ''' keeptime = 0 # do timing for readgrid if keeptime: starttime = time.time() # Read in grid parameters and find x and y in domain on different grids # use full dataset to get grid information # This addresses an issue in netCDF4 that was then fixed, but # this line makes updating unnecessary. Issue described here: # http://code.google.com/p/netcdf4-python/issues/detail?id=170 netCDF._set_default_format(format='NETCDF3_64BIT') gridfile = netCDF.Dataset(grid_filename) # # Read in whether grid is spherical or not # try: # usesphericaltemp = gridfile.variables['spherical'][:] # if usesphericaltemp == 'T': # usespherical = True # else: # usespherical = False # except KeyError: # Assume not lon/lat if spherical flag is not in grid file # usespherical = False # Basemap parameters. if usespherical: llcrnrlon=llcrnrlon; llcrnrlat=llcrnrlat; urcrnrlon=urcrnrlon; urcrnrlat=urcrnrlat; projection='lcc' lat_0=lat_0; lon_0=lon_0; resolution=res; area_thresh=0. # pdb.set_trace() if usebasemap: from mpl_toolkits.basemap import Basemap basemap = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, projection=projection, lat_0=lat_0, lon_0=lon_0, resolution=resolution, area_thresh=area_thresh) else: from pyproj import Proj # this gives somewhat different differences between projected coordinates as compared with previous basemap # definition for the default values. basemap = Proj(proj='lcc', lat_1=llcrnrlat, lat_2=urcrnrlat, lat_0=lat_0, lon_0=lon_0, x_0=0, y_0=0,ellps='clrk66',datum='NAD27') # basemap = (proj='lcc',lat_1=44.33333333333334,lat_2=46,lat_0=43.66666666666666, lon_0=-120.5,x_0=609601.2192024384, y_0=0,ellps='clrk66',datum='NAD27') # basemap = Proj("+proj=lcc +lat_0=lat_0 +lon_0=lon_0") # +x_0=1700000 \ # +y_0=300000 \ # +no_defs \ # +a=6378137 \ # +rf=298.257222101 \ # +to_meter=1") if keeptime: basemaptime = time.time() print "basemap time ", basemaptime - starttime else: basemap = [] if usespherical: lonu = gridfile.variables['lon_u'][:] latu = gridfile.variables['lat_u'][:] xu, yu = basemap(lonu,latu) lonv = gridfile.variables['lon_v'][:] latv = gridfile.variables['lat_v'][:] xv, yv = basemap(lonv,latv) lonr = gridfile.variables['lon_rho'][:]#[1:-1,1:-1] latr = gridfile.variables['lat_rho'][:]#[1:-1,1:-1] xr, yr = basemap(lonr,latr) lonpsi = gridfile.variables['lon_psi'][:] latpsi = gridfile.variables['lat_psi'][:] xpsi, ypsi = basemap(lonpsi,latpsi) else: # read cartesian data xu = gridfile.variables['x_u'][:] yu = gridfile.variables['y_u'][:] xv = gridfile.variables['x_v'][:] yv = gridfile.variables['y_v'][:] xr = gridfile.variables['x_rho'][:] yr = gridfile.variables['y_rho'][:] xpsi = gridfile.variables['x_psi'][:] ypsi = gridfile.variables['y_psi'][:] # assign to spherical arrays lonu, latu = xu, yu lonv, latv = xv, yv lonr, latr = xr, yr lonpsi, latpsi = xpsi, ypsi # Create mask of all active cells if there isn't one already try: mask = gridfile.variables['mask_rho'][:]#[1:-1,1:-1] except KeyError: mask = np.ones_like(xr) pm = gridfile.variables['pm'][:] pn = gridfile.variables['pn'][:] h = gridfile.variables['h'][:] if keeptime: hgridtime = time.time() print "horizontal grid time ", hgridtime - basemaptime # Vertical grid metrics if 's_w' in gridfile.variables: # then given grid file contains vertical grid info sc_r = gridfile.variables['s_w'][:] # sigma coords, 31 layers Cs_r = gridfile.variables['Cs_w'][:] # stretching curve in sigma coords, 31 layers hc = gridfile.variables['hc'][:] theta_s = gridfile.variables['theta_s'][:] theta_b = gridfile.variables['theta_b'][:] Vtransform = gridfile.variables['Vtransform'][:] Vstretching = gridfile.variables['Vstretching'][:] # Still want vertical grid metrics, but are in separate file elif vert_filename is not None: nc = netCDF.Dataset(vert_filename) sc_r = nc.variables['s_w'][:] # sigma coords, 31 layers Cs_r = nc.variables['Cs_w'][:] # stretching curve in sigma coords, 31 layers hc = nc.variables['hc'][:] theta_s = nc.variables['theta_s'][:] theta_b = nc.variables['theta_b'][:] Vtransform = nc.variables['Vtransform'][:] Vstretching = nc.variables['Vstretching'][:] if keeptime: vgridtime = time.time() print "vertical grid time ", vgridtime - hgridtime # make arrays in same order as is expected in the fortran code # ROMS expects [time x k x j x i] but tracmass is expecting [i x j x k x time] # change these arrays to be fortran-directioned instead of python # This is faster than copying arrays. To test: .flags['F_CONTIGUOUS'] mask = np.asfortranarray(mask.T) xr = np.asfortranarray(xr.T) yr = np.asfortranarray(yr.T) xu = np.asfortranarray(xu.T) yu = np.asfortranarray(yu.T) xv = np.asfortranarray(xv.T) yv = np.asfortranarray(yv.T) xpsi = np.asfortranarray(xpsi.T) ypsi = np.asfortranarray(ypsi.T) lonr = np.asfortranarray(lonr.T) latr = np.asfortranarray(latr.T) lonu = np.asfortranarray(lonu.T) latu = np.asfortranarray(latu.T) lonv = np.asfortranarray(lonv.T) latv = np.asfortranarray(latv.T) lonpsi = np.asfortranarray(lonpsi.T) latpsi = np.asfortranarray(latpsi.T) pm = np.asfortranarray(pm.T) pn = np.asfortranarray(pn.T) h = np.asfortranarray(h.T) if keeptime: fortranflippingtime = time.time() print "fortran flipping time ", fortranflippingtime - vgridtime # Basing this on setupgrid.f95 for rutgersNWA example project from Bror # Grid sizes imt = h.shape[0] # 671 jmt = h.shape[1] # 191 if 'sc_r' in dir(): km = sc_r.shape[0]-1 # 30 NOT SURE ON THIS ONE YET if keeptime: gridsizetime = time.time() print "grid size time ", gridsizetime - fortranflippingtime # Index grid, for interpolation between real and grid space # this is for psi grid, so that middle of grid is min + .5 value # # X goes from 0 to imt-2 and Y goes from 0 to jmt-2 # Y, X = np.meshgrid(np.arange(jmt-1),np.arange(imt-1)) # grid in index coordinates, without ghost cells # This is for rho # X goes from 0 to imt-1 and Y goes from 0 to jmt-1 Y, X = np.meshgrid(np.arange(jmt),np.arange(imt)) # grid in index coordinates, without ghost cells # Triangulation for grid space to curvilinear space tri = delaunay.Triangulation(X.flatten(),Y.flatten()) # Triangulation for curvilinear space to grid space # pdb.set_trace() trir = delaunay.Triangulation(xr.flatten(),yr.flatten()) trirllrho = delaunay.Triangulation(lonr.flatten(),latr.flatten()) if keeptime: delaunaytime = time.time() print "delaunay time ", delaunaytime - gridsizetime # tracmass ordering. # Not sure how to convert this to pm, pn with appropriate shift dxv = 1/pm # pm is 1/\Delta x at cell centers dyu = 1/pn # pn is 1/\Delta y at cell centers dxdy = dyu*dxv # Change dxv,dyu to be correct u and v grid size after having # them be too big for dxdy calculation. This is not in the # rutgersNWA example and I am not sure why. [i,j] dxv = 0.5*(dxv[:,:-1]+dxv[:,1:]) dyu = 0.5*(dyu[:-1,:]+dyu[1:,:]) # # These should be interpolated # dxv = dxv[:,:-1] # dyu = dyu[:-1,:] if keeptime: gridmetricstime = time.time() print "grid metrics time ", gridmetricstime - delaunaytime # Adjust masking according to setupgrid.f95 for rutgersNWA example project from Bror if 'sc_r' in dir(): mask2 = mask.copy() kmt = np.ones((imt,jmt),order='f')*km ind = (mask2==1) ind[0:imt-1,:] = ind[1:imt,:] mask2[ind] = 1 ind = (mask2==1) ind[:,0:jmt-1] = ind[:,1:jmt] mask2[ind] = 1 # ind = (mask[1:imt-1,:]==1) # mask[0:imt-2,ind] = 1 # ind = (mask[:,1:imt-1]==1) # mask[:,0:jmt-2] = 1 ind = (mask2==0) kmt[ind] = 0 # Use octant to calculate depths/thicknesses for the appropriate vertical grid parameters # have to transform a few back to ROMS coordinates and python ordering for this zwt0 = octant.depths.get_zw(Vtransform, Vstretching, km+1, theta_s, theta_b, h.T.copy(order='c'), hc, zeta=0, Hscale=3) zrt0 = octant.depths.get_zrho(Vtransform, Vstretching, km, theta_s, theta_b, h.T.copy(order='c'), hc, zeta=0, Hscale=3) # Change dzt to tracmass/fortran ordering zwt0 = zwt0.T.copy(order='f') zrt0 = zrt0.T.copy(order='f') # this should be the base grid layer thickness that doesn't change in time because it # is for the reference vertical level dzt0 = zwt0[:,:,1:] - zwt0[:,:,:-1] if keeptime: calculatingdepthstime = time.time() print "calculating depths time ", calculatingdepthstime - gridmetricstime # Fill in grid structure if 'sc_r' in dir(): grid = {'imt':imt,'jmt':jmt,'km':km,#'angle':angle, 'dxv':dxv,'dyu':dyu,'dxdy':dxdy, 'mask':mask,'kmt':kmt,'dzt0':dzt0, 'zrt0':zrt0,'zwt0':zwt0, 'pm':pm,'pn':pn,'tri':tri,'trir':trir,'trirllrho':trirllrho, 'xr':xr,'xu':xu,'xv':xv,'xpsi':xpsi,'X':X, 'yr':yr,'yu':yu,'yv':yv,'ypsi':ypsi,'Y':Y, 'lonr':lonr,'lonu':lonu,'lonv':lonv,'lonpsi':lonpsi, 'latr':latr,'latu':latu,'latv':yv,'latpsi':latpsi, 'Cs_r':Cs_r,'sc_r':sc_r,'hc':hc,'h':h, 'theta_s':theta_s,'theta_b':theta_b, 'Vtransform':Vtransform, 'Vstretching':Vstretching, 'basemap':basemap} else: grid = {'imt':imt,'jmt':jmt, #'angle':angle, 'dxv':dxv,'dyu':dyu,'dxdy':dxdy, 'mask':mask, 'pm':pm,'pn':pn,'tri':tri,'trir':trir,'trirllrho':trirllrho, 'xr':xr,'xu':xu,'xv':xv,'xpsi':xpsi,'X':X, 'yr':yr,'yu':yu,'yv':yv,'ypsi':ypsi,'Y':Y, 'lonr':lonr,'lonu':lonu,'lonv':lonv,'lonpsi':lonpsi, 'latr':latr,'latu':latu,'latv':yv,'latpsi':latpsi, 'h':h, 'basemap':basemap} if keeptime: griddicttime = time.time() print "saving grid dict time ", griddicttime - calculatingdepthstime gridfile.close() return grid
# overwrite explicit colculation far from pore yfar = abs(y) > Ry0 Fexp[yfar, :] = Fimp[yfar, :] # duplicate array notx0 = x > 0. def mirror(z, sign): return np.concatenate([z, sign * z[notx0]]) x = mirror(x, -1) y = mirror(y, 1) trid = dln.Triangulation(x, y) tri = mtri.Triangulation(x, y) plt.figure() plt.triplot(tri, '-') Fi = [None] * 2 mesh = nanopores.RectangleMesh([-Rx1, -Ry], [Rx1, Ry], 60, 150) for i in (0, 1): z = Fexp[:, i] z = mirror(z, -1 if i == 0 else 1) # interpolate data interp = trid.nn_interpolator(z) interps = lambda x: interp([x[0]], [x[1]]) Fi[i] = S1(interps, mesh)
def mask_extrap(x,y,v,inplace=False,norm_xy=False,mpl_tri=True): ''' Extrapolate numpy array at masked points. Based on delaunay triangulation provided by matplotlib. ''' if np.ma.isMA(v) and v.size!=v.count(): mask=v.mask else: return v sh=v.shape x=x.ravel() y=y.ravel() v=v.ravel() mask=mask.ravel() if inplace: u=v else: u=v.copy() if norm_xy: rxy=(x.max()-x.min())/(y.max()-y.min()) y=y*rxy if not mpl_tri: from matplotlib import delaunay # deprecated in version 1.4 # nn_extrapolator may have problems dealing with regular grids, # nans may be obtained. One simple solution is to rotate the domain! x,y=rot2d(x,y,np.pi/3.) if 0: tri=delaunay.Triangulation(x[~mask],y[~mask]) u[mask]=tri.nn_extrapolator(u[~mask])(x[mask],y[mask]) else: # deal with repeated pairs (problem for nn_extrapolator) xy=x[~mask]+1j*y[~mask] xy,ii=np.unique(xy,1) tri=delaunay.Triangulation(x[~mask][ii],y[~mask][ii]) u[mask]=tri.nn_extrapolator(u[~mask][ii])(x[mask],y[mask]) if np.any(np.isnan(u)): mask=np.isnan(u) tri=delaunay.Triangulation(x[~mask],y[~mask]) u[mask]=tri.nn_extrapolator(u[~mask])(x[mask],y[mask]) else: import matplotlib.tri as mtri # add corners: xv=np.asarray([x.min()-1,x.max()+1,x.max()+1,x.min()-1]) yv=np.asarray([y.min()-1,y.min()-1,y.max()+1,y.max()+1]) vv=np.zeros(4,v.dtype) mv=np.zeros(4,'bool') for i in range(4): d=(x[~mask]-xv[i])**2+(y[~mask]-yv[i])**2 j=np.where(d==d.min())[0][0] vv[i]=u[~mask][j] #x=np.ma.hstack((x.flat,xv)) # use ravel at top instead! x=np.ma.hstack((x,xv)) y=np.ma.hstack((y,yv)) u=np.ma.hstack((u,vv)) mask=np.hstack((mask,mv)) tri=mtri.Triangulation(x[~mask],y[~mask]) print u.shape,x.shape,y.shape,mask.shape u[mask] = mtri.CubicTriInterpolator(tri, u[~mask])(x[mask],y[mask]) if np.any(np.isnan(u)): mask=np.isnan(u) tri=mtri.Triangulation(x[~mask],y[~mask]) u[mask]=mtri.CubicTriInterpolator(tri,u[~mask])(x[mask],y[mask]) u=u[:-4] u.shape=sh if not inplace: return u
def readgrid(loc, nc=None, llcrnrlon=-98.5, llcrnrlat=22.5, urcrnrlon=-87.5, urcrnrlat=31.0, res='i'): ''' readgrid(loc) Kristen Thyng, March 2013 This function should be read in at the beginnind of a run.py call. It reads in all necessary grid information that won't change in time and stores it in a dictionary called grid. All arrays are changed to Fortran ordering (from Python ordering) and to tracmass variables ordering from ROMS ordering i.e. from [t,k,j,i] to [i,j,k,t] right away after reading in. Input: loc File location nc (optional) NetCDF object for relevant files also optional basemap box parameters. Default is for full shelf model. Output: grid Dictionary containing all necessary time-independent grid fields grid dictionary contains: (array sizing is for tracmass ordering) imt,jmt,km Grid index sizing constants in (x,y,z), are for horizontal rho grid [scalar] dxv Horizontal grid cell walls areas in x direction [imt,jmt-1] dyu Horizontal grid cell walls areas in y direction [imt-1,jmt] dxdy Horizontal area of cells defined at cell centers [imt,jmt] mask Land/sea mask [imt,jmt] pm,pn Difference in horizontal grid spacing in x and y [imt,jmt] kmt Number of vertical levels in horizontal space [imt,jmt] dzt0 Thickness in meters of grid at each k-level with time-independent free surface. Surface is at km [imt,jmt,km]. zrt0 Depth in meters of grid at each k-level on vertical rho grid with time-independent free surface. Surface is at km [imt,jmt,km] zwt0 Depth in meters of grid at each k-level on vertical w grid with time-independent free surface. Surface is at km [imt,jmt,km] xr,yr Rho grid zonal (x) and meriodional (y) coordinates [imt,jmt] xu,yu U grid zonal (x) and meriodional (y) coordinates [imt,jmt] xv,yv V grid zonal (x) and meriodional (y) coordinates [imt,jmt] xpsi,ypsi Psi grid zonal (x) and meriodional (y) coordinates [imt,jmt] X,Y Grid index arrays tri,trir Delaunay triangulations Cs_r,sc_r Vertical grid streching paramters [km-1] hc Critical depth [scalar] h Depths [imt,jmt] theta_s Vertical stretching parameter [scalar]. A parameter (typically 0.0 <= theta_s < 5.0) that defines the amount of grid focusing. A higher value for theta_s will focus the grid more. theta_b Vertical stretching parameter [scalar]. A parameter (0.0 < theta_b < 1.0) that says whether the coordinate will be focused at the surface (theta_b -> 1.0) or split evenly between surface and bottom (theta_b -> 0) basemap Basemap object Note: all are in fortran ordering and tracmass ordering except for X, Y, tri, and tric To test: [array].flags['F_CONTIGUOUS'] will return true if it is fortran ordering ''' # Basemap parameters. llcrnrlon = llcrnrlon llcrnrlat = llcrnrlat urcrnrlon = urcrnrlon urcrnrlat = urcrnrlat projection = 'lcc' lat_0 = 30 lon_0 = -94 resolution = res area_thresh = 0. basemap = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, projection=projection, lat_0=lat_0, lon_0=lon_0, resolution=resolution, area_thresh=area_thresh) # Read in grid parameters and find x and y in domain on different grids # if len(loc) == 2: # gridfile = netCDF.Dataset(loc[1]) # use full dataset to get grid information # This addresses an issue in netCDF4 that was then fixed, but # this line makes updating unnecessary. Issue described here: # http://code.google.com/p/netcdf4-python/issues/detail?id=170 netCDF._set_default_format(format='NETCDF3_64BIT') # pdb.set_trace() # grid is included in nc file if using thredds or forecast output if 'http' in loc: gridfile = netCDF.Dataset(loc) elif len(loc) == 2: gridfile = netCDF.Dataset(loc[1]) else: gridfile = netCDF.Dataset(loc + 'grid.nc') lonu = gridfile.variables['lon_u'][:] latu = gridfile.variables['lat_u'][:] xu, yu = basemap(lonu, latu) lonv = gridfile.variables['lon_v'][:] latv = gridfile.variables['lat_v'][:] xv, yv = basemap(lonv, latv) lonr = gridfile.variables['lon_rho'][:] #[1:-1,1:-1] latr = gridfile.variables['lat_rho'][:] #[1:-1,1:-1] xr, yr = basemap(lonr, latr) lonpsi = gridfile.variables['lon_psi'][:] latpsi = gridfile.variables['lat_psi'][:] xpsi, ypsi = basemap(lonpsi, latpsi) mask = gridfile.variables['mask_rho'][:] #[1:-1,1:-1] pm = gridfile.variables['pm'][:] pn = gridfile.variables['pn'][:] h = gridfile.variables['h'][:] angle = gridfile.variables['angle'][:] # Vertical grid metrics if 'http' in loc or len(loc) == 2 or 's_w' in gridfile.variables: sc_r = gridfile.variables['s_w'][:] # sigma coords, 31 layers Cs_r = gridfile.variables[ 'Cs_w'][:] # stretching curve in sigma coords, 31 layers hc = gridfile.variables['hc'][:] theta_s = gridfile.variables['theta_s'][:] theta_b = gridfile.variables['theta_b'][:] Vtransform = gridfile.variables['Vtransform'][0] Vstretching = gridfile.variables['Vstretching'][0] elif nc is not None: # for if running off local grid/nc files sc_r = nc.variables['s_w'][:] # sigma coords, 31 layers Cs_r = nc.variables[ 'Cs_w'][:] # stretching curve in sigma coords, 31 layers hc = nc.variables['hc'][:] theta_s = nc.variables['theta_s'][:] theta_b = nc.variables['theta_b'][:] Vtransform = nc.variables['Vtransform'][:] Vstretching = nc.variables['Vstretching'][:] # make arrays in same order as is expected in the fortran code # ROMS expects [time x k x j x i] but tracmass is expecting [i x j x k x time] # change these arrays to be fortran-directioned instead of python # tic = time.time() # This is faster than copying arrays. To test: .flags['F_CONTIGUOUS'] mask = np.asfortranarray(mask.T) xr = np.asfortranarray(xr.T) yr = np.asfortranarray(yr.T) xu = np.asfortranarray(xu.T) yu = np.asfortranarray(yu.T) xv = np.asfortranarray(xv.T) yv = np.asfortranarray(yv.T) xpsi = np.asfortranarray(xpsi.T) ypsi = np.asfortranarray(ypsi.T) lonr = np.asfortranarray(lonr.T) latr = np.asfortranarray(latr.T) lonu = np.asfortranarray(lonu.T) latu = np.asfortranarray(latu.T) lonv = np.asfortranarray(lonv.T) latv = np.asfortranarray(latv.T) lonpsi = np.asfortranarray(lonpsi.T) latpsi = np.asfortranarray(latpsi.T) pm = np.asfortranarray(pm.T) pn = np.asfortranarray(pn.T) h = np.asfortranarray(h.T) # print "fortran time ",time.time()-tic # pdb.set_trace() # Basing this on setupgrid.f95 for rutgersNWA example project from Bror # Grid sizes imt = h.shape[0] # 671 jmt = h.shape[1] # 191 # km = sc_r.shape[0] # 31 if ('http' in loc) or ( nc is not None) or len(loc) == 2 or 's_w' in gridfile.variables: km = sc_r.shape[0] - 1 # 30 NOT SURE ON THIS ONE YET # Index grid, for interpolation between real and grid space # this is for psi grid, so that middle of grid is min + .5 value # # X goes from 0 to imt-2 and Y goes from 0 to jmt-2 # Y, X = np.meshgrid(np.arange(jmt-1),np.arange(imt-1)) # grid in index coordinates, without ghost cells # This is for rho # X goes from 0 to imt-1 and Y goes from 0 to jmt-1 Y, X = np.meshgrid( np.arange(jmt), np.arange(imt)) # grid in index coordinates, without ghost cells # Triangulation for grid space to curvilinear space tri = delaunay.Triangulation(X.flatten(), Y.flatten()) # Triangulation for curvilinear space to grid space # pdb.set_trace() trir = delaunay.Triangulation(xr.flatten(), yr.flatten()) trirllrho = delaunay.Triangulation(lonr.flatten(), latr.flatten()) # tracmass ordering. # Not sure how to convert this to pm, pn with appropriate shift dxv = 1 / pm #.copy() # pm is 1/\Delta x at cell centers dyu = 1 / pn #.copy() # pn is 1/\Delta y at cell centers # dxv = xr.copy() # dxv[0:imt-2,:] = dxv[1:imt-1,:] - dxv[0:imt-2,:] # dxv[imt-1:imt,:] = dxv[imt-3:imt-2,:] # # pdb.set_trace() # dyu = yr.copy() # dyu[:,0:jmt-2] = dyu[:,1:jmt-1] - dyu[:,0:jmt-2] # dyu[:,jmt-1] = dyu[:,jmt-2] dxdy = dyu * dxv # Change dxv,dyu to be correct u and v grid size after having # them be too big for dxdy calculation. This is not in the # rutgersNWA example and I am not sure why. [i,j] dxv = 0.5 * (dxv[:, :-1] + dxv[:, 1:]) dyu = 0.5 * (dyu[:-1, :] + dyu[1:, :]) # # These should be interpolated # dxv = dxv[:,:-1] # dyu = dyu[:-1,:] # Adjust masking according to setupgrid.f95 for rutgersNWA example project from Bror # pdb.set_trace() if ('http' in loc) or ( nc is not None) or len(loc) == 2 or 's_w' in gridfile.variables: mask2 = mask.copy() kmt = np.ones((imt, jmt), order='f') * km ind = (mask2[1:imt, :] == 1) mask2[0:imt - 1, ind] = 1 ind = (mask2[:, 1:jmt] == 1) mask2[ind, 0:jmt - 1] = 1 # ind = (mask[1:imt-1,:]==1) # mask[0:imt-2,ind] = 1 # ind = (mask[:,1:imt-1]==1) # mask[:,0:jmt-2] = 1 ind = (mask2 == 0) kmt[ind] = 0 # Use octant to calculate depths/thicknesses for the appropriate vertical grid parameters # have to transform a few back to ROMS coordinates and python ordering for this zwt0 = octant.depths.get_zw(Vtransform, Vstretching, km + 1, theta_s, theta_b, h.T.copy(order='c'), hc, zeta=0, Hscale=3) zrt0 = octant.depths.get_zrho(Vtransform, Vstretching, km, theta_s, theta_b, h.T.copy(order='c'), hc, zeta=0, Hscale=3) # Change dzt to tracmass/fortran ordering zwt0 = zwt0.T.copy(order='f') zrt0 = zrt0.T.copy(order='f') # this should be the base grid layer thickness that doesn't change in time because it # is for the reference vertical level dzt0 = zwt0[:, :, 1:] - zwt0[:, :, :-1] # Fill in grid structure if ('http' in loc) or ( nc is not None) or len(loc) == 2 or 's_w' in gridfile.variables: grid = { 'imt': imt, 'jmt': jmt, 'km': km, 'angle': angle, 'dxv': dxv, 'dyu': dyu, 'dxdy': dxdy, 'mask': mask, 'kmt': kmt, 'dzt0': dzt0, 'zrt0': zrt0, 'zwt0': zwt0, 'pm': pm, 'pn': pn, 'tri': tri, 'trir': trir, 'trirllrho': trirllrho, 'xr': xr, 'xu': xu, 'xv': xv, 'xpsi': xpsi, 'X': X, 'yr': yr, 'yu': yu, 'yv': yv, 'ypsi': ypsi, 'Y': Y, 'lonr': lonr, 'lonu': lonu, 'lonv': lonv, 'lonpsi': lonpsi, 'latr': latr, 'latu': latu, 'latv': yv, 'latpsi': latpsi, 'Cs_r': Cs_r, 'sc_r': sc_r, 'hc': hc, 'h': h, 'theta_s': theta_s, 'theta_b': theta_b, 'Vtransform': Vtransform, 'Vstretching': Vstretching, 'basemap': basemap } else: grid = { 'imt': imt, 'jmt': jmt, 'angle': angle, 'dxv': dxv, 'dyu': dyu, 'dxdy': dxdy, 'mask': mask, 'pm': pm, 'pn': pn, 'tri': tri, 'trir': trir, 'trirllrho': trirllrho, 'xr': xr, 'xu': xu, 'xv': xv, 'xpsi': xpsi, 'X': X, 'yr': yr, 'yu': yu, 'yv': yv, 'ypsi': ypsi, 'Y': Y, 'lonr': lonr, 'lonu': lonu, 'lonv': lonv, 'lonpsi': lonpsi, 'latr': latr, 'latu': latu, 'latv': yv, 'latpsi': latpsi, 'h': h, 'basemap': basemap } gridfile.close() return grid
def main(): Mforward = get_rot_mat(-numpy.pi/2,1,0,0) scale = numpy.eye(3) scale[2,2]=-1 Mforward = numpy.dot(Mforward,scale) xform_my_long_lat_2_heisenberg = LongLatRotator(Mforward) Mreverse = numpy.linalg.inv(Mforward) xform_heisenberg_long_lat_2_my = LongLatRotator(Mreverse) ## triangulate data ############################### left_tri = delaunay.Triangulation(x, y) ## transform data to long & lat ################### hlong,hlat,hR = xform_stereographic_2_long_lat(x,y) long,lat,R = xform_heisenberg_long_lat_2_my(hlong,hlat,hR) ## put in form similar to output of make_receptor_info # left_receptor_dirs = numpy.asarray(long_lat2xyz(long,lat,R)) left_receptor_dirs = numpy.transpose( left_receptor_dirs ) left_receptor_dirs = [cgtypes.vec3(v) for v in left_receptor_dirs] left_triangles = left_tri.triangle_nodes left_ordered_tri_idxs = my_voronoi(left_tri,x,y) left_hex_faces = [] for center_vert_idx in range(len(left_receptor_dirs)): center_vert = left_receptor_dirs[center_vert_idx] this_ordered_tri_idxs = left_ordered_tri_idxs[center_vert_idx] this_face = [] for tri_idx in this_ordered_tri_idxs: if tri_idx == -1: this_vert = center_vert else: nodes = left_triangles[tri_idx] this_vert = (left_receptor_dirs[int(nodes[0])]+ left_receptor_dirs[int(nodes[1])]+ left_receptor_dirs[int(nodes[2])])*(1.0/3.0) this_face.append(this_vert) left_hex_faces.append(this_face) ############################### # duplicate for right eye right_receptor_dirs = [cgtypes.vec3((v.x,-v.y,v.z)) for v in left_receptor_dirs] receptor_dirs = left_receptor_dirs + right_receptor_dirs right_idx_offset = len(left_receptor_dirs) right_triangles = [] for tri in left_triangles: newtri = [] for idx in tri: newtri.append( idx+right_idx_offset ) right_triangles.append(newtri) triangles = list(left_triangles) + right_triangles right_hex_faces = [] for face in left_hex_faces: newface = [] for v in face: newface.append( cgtypes.vec3((v.x,-v.y,v.z)) ) right_hex_faces.append(newface) hex_faces = list(left_hex_faces) + right_hex_faces ############################### receptor_dir_slicer = {None:slice(0,len(receptor_dirs),1), 'left':slice(0,right_idx_offset,1), 'right':slice(right_idx_offset,len(receptor_dirs),1)} ############################### print('calculating interommatidial distances') delta_phi = get_mean_interommatidial_distance(receptor_dirs,triangles) delta_rho_q = numpy.asarray(delta_phi) * 1.1 # rough approximation. follows from caption of Fig. 18, Buchner, 1984 (in Ali) # make optical lowpass filters print('calculating weight_maps...') weight_maps_64 = make_receptor_sensitivities( receptor_dirs, delta_rho_q=delta_rho_q, res=64 ) print('done') clip_thresh=1e-5 floattype=numpy.float32 tmp_weights = flatten_cubemap( weight_maps_64[0] ) # get first one to take size n_receptors = len(receptor_dirs) len_wm = len(tmp_weights) print('allocating memory...') bigmat_64 = numpy.zeros( (n_receptors, len_wm), dtype=floattype ) print('done') print('flattening, clipping, casting...') for i, weight_cubemap in enumerate(weight_maps_64): weights = flatten_cubemap( weight_cubemap ) if clip_thresh is not None: weights = numpy.choose(weights<clip_thresh,(weights,0)) bigmat_64[i,:] = weights.astype( bigmat_64.dtype ) print('done') print('worst gain (should be unity)',min(numpy.sum( bigmat_64, axis=1))) print('filling spmat_64...') sys.stdout.flush() spmat_64 = scipy.sparse.csc_matrix(bigmat_64) print('done') M,N = bigmat_64.shape print('Compressed to %d of %d'%(len(spmat_64.data),M*N)) ###################### fd = open('receptor_directions_buchner71.csv','w') writer = csv.writer( fd ) for row in receptor_dirs: writer.writerow( row ) fd.close() fd = open('precomputed_buchner71.py','w') fd.write( '# -*- coding: utf-8 -*-\n') fd.write( '# Automatically generated. Do not modify this file.\n') fd.write( 'from __future__ import print_function\n') fd.write( 'import numpy\n') fd.write( 'import scipy.sparse\n') fd.write( 'import scipy.io\n') fd.write( 'import os\n') fd.write( 'datadir = os.path.split(__file__)[0]\n') fd.write( 'cube_order = %s\n'%repr(cube_order) ) fd.write( 'from cgtypes import vec3, quat #cgkit 1.x\n') save_as_python(fd, receptor_dir_slicer, 'receptor_dir_slicer', fname_extra='_buchner71' ) save_as_python(fd, spmat_64, 'receptor_weight_matrix_64', fname_extra='_buchner71' ) save_as_python(fd, list(map(make_repr_able,receptor_dirs)), 'receptor_dirs', fname_extra='_buchner71' ) st = [ tuple(t) for t in triangles] save_as_python(fd, st, 'triangles', fname_extra='_buchner_1971' ) save_as_python(fd, list(map(make_repr_able,hex_faces)), 'hex_faces', fname_extra='_buchner_1971' ) fd.write( '\n') fd.write( '\n') fd.write( '\n') extra = open('plot_receptors_vtk.py','r').read() fd.write( extra ) fd.close()
def list_plot3d_tuples(v, interpolation_type, texture, **kwds): r""" A 3-dimensional plot of a surface defined by the list `v` of points in 3-dimensional space. INPUT: - ``v`` - something that defines a set of points in 3 space, for example: - a matrix This will be if using an interpolation type other than 'linear', or if using ``num_points`` with 'linear'; otherwise see :func:`list_plot3d_matrix`. - a list of 3-tuples - a list of lists (all of the same length, under same conditions as a matrix) - ``texture`` - (default: "automatic", a solid light blue) OPTIONAL KEYWORDS: - ``interpolation_type`` - 'linear', 'nn' (natural neighbor), 'spline' 'linear' will perform linear interpolation The option 'nn' will interpolate by using natural neighbors. The value for an interpolation point is estimated using weighted values of the closest surrounding points in the triangulation. The option 'spline' interpolates using a bivariate B-spline. When v is a matrix the default is to use linear interpolation, when v is a list of points the default is nearest neighbor. - ``degree`` - an integer between 1 and 5, controls the degree of spline used for spline interpolation. For data that is highly oscillatory use higher values - ``point_list`` - If point_list=True is passed, then if the array is a list of lists of length three, it will be treated as an array of points rather than a `3\times n` array. - ``num_points`` - Number of points to sample interpolating function in each direction. By default for an `n\times n` array this is `n`. - ``**kwds`` - all other arguments are passed to the surface function OUTPUT: a 3d plot EXAMPLES: All of these use this function; see :func:`list_plot3d` for other list plots:: sage: pi = float(pi) sage: m = matrix(RDF, 6, [sin(i^2 + j^2) for i in [0,pi/5,..,pi] for j in [0,pi/5,..,pi]]) sage: list_plot3d(m, texture='yellow', interpolation_type='linear', num_points=5) # indirect doctest Graphics3d Object :: sage: list_plot3d(m, texture='yellow', interpolation_type='spline', frame_aspect_ratio=[1, 1, 1/3]) Graphics3d Object :: sage: show(list_plot3d([[1, 1, 1], [1, 2, 1], [0, 1, 3], [1, 0, 4]], point_list=True)) :: sage: list_plot3d([(1, 2, 3), (0, 1, 3), (2, 1, 4), (1, 0, -2)], texture='yellow', num_points=50) Graphics3d Object """ from matplotlib import tri, delaunay import numpy import scipy from random import random from scipy import interpolate from plot3d import plot3d if len(v)<3: raise ValueError("We need at least 3 points to perform the interpolation") x = [float(p[0]) for p in v] y = [float(p[1]) for p in v] z = [float(p[2]) for p in v] # If the (x,y)-coordinates lie in a one-dimensional subspace, the # matplotlib Delaunay code segfaults. Therefore, we compute the # correlation of the x- and y-coordinates and add small random # noise to avoid the problem if needed. corr_matrix = numpy.corrcoef(x, y) if corr_matrix[0, 1] > 0.9 or corr_matrix[0, 1] < -0.9: ep = float(.000001) x = [float(p[0]) + random()*ep for p in v] y = [float(p[1]) + random()*ep for p in v] # If the list of data points has two points with the exact same # (x,y)-coordinate but different z-coordinates, then we sometimes # get segfaults. The following block checks for this and raises # an exception if this is the case. # We also remove duplicate points (which matplotlib can't handle). # Alternatively, the code in the if block above which adds random # error could be applied to perturb the points. drop_list = [] nb_points = len(x) for i in range(nb_points): for j in range(i+1, nb_points): if x[i] == x[j] and y[i] == y[j]: if z[i] != z[j]: raise ValueError("Two points with same x,y coordinates and different z coordinates were given. Interpolation cannot handle this.") elif z[i] == z[j]: drop_list.append(j) x = [x[i] for i in range(nb_points) if i not in drop_list] y = [y[i] for i in range(nb_points) if i not in drop_list] z = [z[i] for i in range(nb_points) if i not in drop_list] xmin = float(min(x)) xmax = float(max(x)) ymin = float(min(y)) ymax = float(max(y)) num_points = kwds['num_points'] if 'num_points' in kwds else int(4*numpy.sqrt(len(x))) #arbitrary choice - assuming more or less a nxn grid of points # x should have n^2 entries. We sample 4 times that many points. if interpolation_type == 'linear': T = tri.Triangulation(x, y) f = tri.LinearTriInterpolator(T, z) j = numpy.complex(0, 1) from parametric_surface import ParametricSurface def g(x, y): z = f(x, y) return (x, y, z) G = ParametricSurface(g, (list(numpy.r_[xmin:xmax:num_points*j]), list(numpy.r_[ymin:ymax:num_points*j])), texture=texture, **kwds) G._set_extra_kwds(kwds) return G if interpolation_type == 'nn' or interpolation_type =='default': T=delaunay.Triangulation(x,y) f=T.nn_interpolator(z) f.default_value=0.0 j=numpy.complex(0,1) vals=f[ymin:ymax:j*num_points,xmin:xmax:j*num_points] from parametric_surface import ParametricSurface def g(x,y): i=round( (x-xmin)/(xmax-xmin)*(num_points-1) ) j=round( (y-ymin)/(ymax-ymin)*(num_points-1) ) z=vals[int(j),int(i)] return (x,y,z) G = ParametricSurface(g, (list(numpy.r_[xmin:xmax:num_points*j]), list(numpy.r_[ymin:ymax:num_points*j])), texture=texture, **kwds) G._set_extra_kwds(kwds) return G if interpolation_type == 'spline': from plot3d import plot3d kx = kwds['kx'] if 'kx' in kwds else 3 ky = kwds['ky'] if 'ky' in kwds else 3 if 'degree' in kwds: kx = kwds['degree'] ky = kwds['degree'] s = kwds['smoothing'] if 'smoothing' in kwds else len(x)-numpy.sqrt(2*len(x)) s = interpolate.bisplrep(x, y, z, [int(1)]*len(x), xmin, xmax, ymin, ymax, kx=kx, ky=ky, s=s) f = lambda x,y: interpolate.bisplev(x, y, s) return plot3d(f, (xmin, xmax), (ymin, ymax), texture=texture, plot_points=[num_points, num_points], **kwds)
def calculate_flash_stats(flash, min_pts=2): logger = logging.getLogger('FlashAutorunLogger') Re = 6378.137 * 1000 #Earth's radius in m pi = np.pi flash.pointCount = flash.points.shape[0] fl_id = np.unique(flash.points['flash_id']) assert (fl_id.shape[0] == 1) flash.id = fl_id[0] lat = np.asarray(flash.points['lat'], dtype=float) lon = np.asarray(flash.points['lon'], dtype=float) alt = np.asarray(flash.points['alt'], dtype=float) # # # mean location of all points latavg, lonavg, altavg = lat.mean(), lon.mean(), alt.mean() x = Re * (np.radians(lonavg) - np.radians(lon)) * np.cos( np.radians(latavg)) y = Re * (np.radians(latavg) - np.radians(lat)) z = altavg - alt # r_sq = x**2.0 + y**2.0 + z**2.0 # sigma_sq = r_sq.sum()/r_sq.shape[0] # sigma = np.std(r_sq) area = 0.0 if flash.pointCount > 2: try: # find the convex hull and calculate its area # There's a patch to deal with duplicate points - mpl has it as of 0.98.6svn # scipy scikits.delaunay has it after r745 from matplotlib import delaunay tri = delaunay.Triangulation(x, y) hull = tri.hull area = poly_area(tri.x[hull], tri.y[hull]) except ImportError: logger.error( "*** Flash area not calculated - requires delaunay from or matplotlib or scipy" ) except IndexError: # tends to happen when a duplicate point causes the point count to # drop to 2, leading to a degenerate polygon with no area logger.warning('Setting area to 0 for flash with points %s, %s' % (x, y)) area = 0.0 except KeyError: # hull indexing has problems here logger.warning('Setting area to 0 for flash with points %s, %s' % (x, y)) area = 0.0 if area == 0.0: energy_estimate = 0. else: energy_estimate = energy(area / 1.0e6) volume = 0.0 if flash.pointCount > 3: # Need four points to make at least one tetrahedron. try: volume, vertices, simplex_volumes = hull_volume( np.vstack((x, y, z)).T) except QhullError: # this can happen with a degenerate first simplex - all points are # coplanar to machine precision. Try again, after adding a tiny amount # to the first point. print( "Perturbing one source to help triangulation for flash with {0} points" .format(flash.pointCount)) # we can tolerate perturbing by no more than 1 m machine_eps = 1.0 # np.finfo(x.dtype).eps perturb = 2 * machine_eps * np.random.random(size=3) - machine_eps x[0] += perturb[0] y[0] += perturb[1] z[0] += perturb[2] volume, vertices, simplex_volumes = hull_volume( np.vstack((x, y, z)).T) flash_init_idx = np.argmin(flash.points['time']) ###ROUGH APPROXIMATION FOR NOW: ####################### air_density = barotropic_rho(alt[flash_init_idx] * 1e-3) if volume == 0.: specific_energy = 0. else: specific_energy = energy_estimate / ((volume / 1.0e9) * air_density) ####################################################### flash.start = flash.points[flash_init_idx]['time'] flash.end = flash.points['time'].max() flash.duration = flash.end - flash.start flash.area = area / 1.0e6 # km^2, 1000x1000 flash.initLat = lat[flash_init_idx] flash.initLon = lon[flash_init_idx] flash.initStd = 0.0 flash.initAlt = alt[flash_init_idx] flash.initPts = (int(flash_init_idx), ) flash.ctralt = altavg flash.ctrlat = latavg flash.ctrlon = lonavg flash.volume = volume / 1.0e9 # km^3, 1000x1000x1000 m #CHANGED 03-20-17 flash.total_energy = energy_estimate #flash.energy ---> flash.tot_energy flash.specific_energy = specific_energy #flash.tot_energy ---> flash.specific_energy