def make_rtree(self): with self.dataset() as nc: sg = load_grid(nc) def rtree_generator_function(): c = 0 centers = pair_arrays(sg.center_lon, sg.center_lat) for i, axis in enumerate(centers): for j, (x, y) in enumerate(axis): c += 1 yield (c, (x, y, x, y), (i, j)) logger.info("Building Faces (centers) Rtree Topology Cache for {0}".format(self.name)) _, temp_file = tempfile.mkstemp(suffix='.face') start = time.time() p = index.Property() p.filename = str(temp_file) p.overwrite = True p.storage = index.RT_Disk p.dimension = 2 index.Index(p.filename.decode('utf-8'), rtree_generator_function(), properties=p, overwrite=True, interleaved=True) logger.info("Built Faces (centers) Rtree Topology Cache in {0} seconds.".format(time.time() - start)) shutil.move('{}.dat'.format(temp_file), self.face_tree_data_file) shutil.move('{}.idx'.format(temp_file), self.face_tree_index_file)
def make_rtree(self): with self.dataset() as nc: sg = load_grid(nc) def rtree_generator_function(): c = 0 centers = np.dstack((sg.center_lon, sg.center_lat)) for i, axis in enumerate(centers): for j, (x, y) in enumerate(axis): c += 1 yield (c, (x, y, x, y), (i, j)) logger.info( "Building Faces (centers) Rtree Topology Cache for {0}".format( self.name)) _, temp_file = tempfile.mkstemp(suffix='.face') start = time.time() p = index.Property() p.filename = str(temp_file) p.overwrite = True p.storage = index.RT_Disk p.dimension = 2 idx = index.Index(p.filename, rtree_generator_function(), properties=p, overwrite=True, interleaved=True) idx.close() logger.info( "Built Faces (centers) Rtree Topology Cache in {0} seconds.". format(time.time() - start)) shutil.move('{}.dat'.format(temp_file), self.face_tree_data_file) shutil.move('{}.idx'.format(temp_file), self.face_tree_index_file)
def wgs84_bounds(self, layer): try: cached_sg = load_grid(self.topology_file) except: pass else: #centers = cached_sg.centers centers = pair_arrays(cached_sg.center_lon, cached_sg.center_lat) longitudes = centers[..., 0] latitudes = centers[..., 1] lon_name, lat_name = cached_sg.face_coordinates lon_var_obj = getattr(cached_sg, lon_name) lat_var_obj = getattr(cached_sg, lat_name) lon_trimmed = longitudes[lon_var_obj.center_slicing] lat_trimmed = latitudes[lat_var_obj.center_slicing] lon_max = lon_trimmed.max() lon_min = lon_trimmed.min() lat_max = lat_trimmed.max() lat_min = lat_trimmed.min() return DotDict(minx=lon_min, miny=lat_min, maxx=lon_max, maxy=lat_max, bbox=(lon_min, lat_min, lon_max, lat_max) )
def update_grid_cache(self, force=False): with self.dataset() as nc: if nc is None: logger.error( "Failed update_grid_cache, could not load dataset " "as a netCDF4 object") return sg = load_grid(nc) # Atomic write tmphandle, tmpsave = tempfile.mkstemp() try: sg.save_as_netcdf(tmpsave) finally: os.close(tmphandle) if os.path.isfile(tmpsave): shutil.move(tmpsave, self.topology_file) else: logger.error( "Failed to create topology_file cache for Dataset '{}'" .format(self.dataset.name)) return # Now do the RTree index self.make_rtree()
def roms_field(filename=None, dataset=None): if dataset is None: dataset = nc4.Dataset(filename) grid = pysgrid.load_grid(dataset) time = Time(dataset['ocean_time']) u = grid.u v = grid.v u_mask = grid.mask_u v_mask = grid.mask_v r_mask = grid.mask_rho land_mask = grid.mask_psi variables = {'u': u, 'v': v, 'u_mask': u_mask, 'v_mask': v_mask, 'land_mask': land_mask, 'time': time} return SField(grid, time=time, variables=variables)
def wgs84_bounds(self, layer): try: cached_sg = load_grid(self.topology_file) except BaseException: pass else: lon_name, lat_name = cached_sg.face_coordinates lon_var_obj = getattr(cached_sg, lon_name) lat_var_obj = getattr(cached_sg, lat_name) lon_trimmed = cached_sg.center_lon[lon_var_obj.center_slicing] lat_trimmed = cached_sg.center_lat[lat_var_obj.center_slicing] lon_max = lon_trimmed.max() lon_min = lon_trimmed.min() lat_max = lat_trimmed.max() lat_min = lat_trimmed.min() return DotDict(minx=lon_min, miny=lat_min, maxx=lon_max, maxy=lat_max, bbox=(lon_min, lat_min, lon_max, lat_max))
def update_cache(self, force=False): with self.dataset() as nc: sg = load_grid(nc) sg.save_as_netcdf(self.topology_file) if not os.path.exists(self.topology_file): logger.error("Failed to create topology_file cache for Dataset '{}'".format(self.dataset)) return # add time to the cached topology time_vars = nc.get_variables_by_attributes(standard_name='time') time_dims = list(itertools.chain.from_iterable([time_var.dimensions for time_var in time_vars])) unique_time_dims = list(set(time_dims)) with EnhancedDataset(self.topology_file, mode='a') as cached_nc: # create pertinent time dimensions if they aren't already present for unique_time_dim in unique_time_dims: dim_size = len(nc.dimensions[unique_time_dim]) try: cached_nc.createDimension(unique_time_dim, size=dim_size) except RuntimeError: continue # support cases where there may be more than one variable with standard_name='time' in a dataset for time_var in time_vars: try: time_var_obj = cached_nc.createVariable(time_var.name, time_var.dtype, time_var.dimensions) except RuntimeError: time_var_obj = cached_nc.variables[time_var.name] finally: time_var_obj[:] = time_var[:] time_var_obj.units = time_var.units time_var_obj.standard_name = 'time' # Now do the RTree index self.make_rtree() self.cache_last_updated = datetime.utcnow().replace(tzinfo=pytz.utc) self.save()
def load_grid(nc): """ Takes a `nc` (netCDF4-python object), a file/URL path, or an `iris.cube.Cube` instance. Returns a tuple with the `grid` object, `mesh` type, and grid `polygons`. """ grid = None mesh = 'non-compliant' if nc.__class__.__name__ == 'Cube': polygons = _parse_cube(nc) return grid, polygons, mesh elif isinstance(nc, Dataset): pass else: nc = Dataset(nc) try: grid = pysgrid.load_grid(nc) polygons = _parse_sgrid(grid) mesh = 'sgrid' return grid, polygons, mesh except (ValueError, KeyError): pass try: grid = pyugrid.UGrid.from_nc_dataset(nc) polygons = _parse_ugrid(grid) mesh = 'ugrid' return grid, polygons, mesh except ValueError: pass # When all fails try non-compliant `grid` type. polygons = _parse_grid(nc) return grid, polygons, mesh
import pysgrid import matplotlib.pyplot as plt import cartopy.crs as ccrs # from cartopy.io import shapereader from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER # rotation is still ugly... from pysgrid.processing_2d import rotate_vectors, vector_sum # url = ('http://geoport.whoi.edu/thredds/dodsC/clay/usgs/users/jcwarner/Projects/Sandy/triple_nest/00_dir_NYB05.ncml') # noqa url2 = ('http://geoport-dev.whoi.edu/thredds/dodsC/clay/usgs/users/zdefne/run076/his/00_dir_roms_display.ncml') # noqa nc = Dataset(url2) sgrid = pysgrid.load_grid(nc) sgrid # We need a better __repr__ and __str__ !!! lons, lats = np.mgrid[-74.38:-74.26:600j, 39.45:39.56:600j] points = np.stack((lons, lats), axis=-1) print(points.shape) time_idx = 0 v_idx = 0 interp_u = sgrid.interpolate_var_to_points( points, sgrid.u[time_idx, v_idx], slices=None) interp_v = sgrid.interpolate_var_to_points( points, sgrid.v, slices=[time_idx, v_idx]) ind = sgrid.locate_faces(points)
VelocityTS.deserialize(vel.serialize())) velfromweb.name = 'velfromweb' pp.pprint(vel.serialize(json_='save')) pp.pprint(velfromweb.serialize(json_='save')) velfromsave = VelocityTS.new_from_dict( VelocityTS.deserialize(velfromweb.serialize(json_='save'))) pp.pprint(velfromsave) velfromsave.at(np.array([(0, 0)]), datetime(2000, 1, 1, 0, 0)) url = ( 'http://geoport.whoi.edu/thredds/dodsC/clay/usgs/users/jcwarner/Projects/Sandy/triple_nest/00_dir_NYB05.ncml' ) test_grid = pysgrid.load_grid(url) grid_u = test_grid.u grid_v = test_grid.v grid_time = test_grid.ocean_time._data u2 = GriddedProp('u', 'm/s', time=grid_time, data=grid_u, grid=test_grid, data_file=url, grid_file=url) v2 = GriddedProp('v', 'm/s', time=grid_time, data=grid_v,
from netCDF4 import Dataset import numpy as np import pysgrid #url = ('http://geoport.whoi.edu/thredds/dodsC/clay/usgs/users/jcwarner/Projects/Sandy/triple_nest/00_dir_NYB05.ncml') url2 = ( 'http://geoport-dev.whoi.edu/thredds/dodsC/clay/usgs/users/zdefne/run076/his/00_dir_roms_display.ncml' ) nc = Dataset(url2) sgrid = pysgrid.load_grid(nc) sgrid # We need a better __repr__ and __str__ !!! lons, lats = np.mgrid[-74.38:-74.26:600j, 39.45:39.56:600j] points = np.stack((lons, lats), axis=-1) print points.shape time_idx = 0 v_idx = 0 # interp_u = sgrid.interpolate_var_to_points( # points, sgrid.u, slice=[time_idx, v_idx]) # interp_v = sgrid.interpolate_var_to_points( # points, sgrid.v, slice=[time_idx, v_idx]) # sgrid.interpolate_var_to_points( # points[19:21, 241:243], sgrid.u, slices=[time_idx, v_idx]) # interp_u = sgrid.interpolate_var_to_points( # points, sgrid.u, slices=[time_idx, v_idx]) interp_u = sgrid.interpolate_var_to_points(points, sgrid.u[time_idx, v_idx], slices=None)
def getmap(self, layer, request): time_index, time_value = self.nearest_time(layer, request.GET['time']) wgs84_bbox = request.GET['wgs84_bbox'] with self.dataset() as nc: cached_sg = load_grid(self.topology_file) lon_name, lat_name = cached_sg.face_coordinates lon_obj = getattr(cached_sg, lon_name) lat_obj = getattr(cached_sg, lat_name) #centers = cached_sg.centers centers = pair_arrays(cached_sg.center_lon, cached_sg.center_lat) lon = centers[..., 0][lon_obj.center_slicing] lat = centers[..., 1][lat_obj.center_slicing] if isinstance(layer, Layer): data_obj = getattr(cached_sg, layer.access_name) raw_var = nc.variables[layer.access_name] if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z(layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 2: raw_data = raw_var[data_obj.center_slicing] else: raise BaseException('Unable to trim variable {0} data.'.format(layer.access_name)) # handle edge variables if data_obj.location is not None and 'edge' in data_obj.location: raw_data = avg_to_cell_center(raw_data, data_obj.center_axis) if request.GET['image_type'] == 'pcolor': return mpl_handler.pcolormesh_response(lon, lat, data=raw_data, request=request) elif request.GET['image_type'] in ['filledhatches', 'hatches', 'filledcontours', 'contours']: return mpl_handler.contouring_response(lon, lat, data=raw_data, request=request) else: raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type'])) elif isinstance(layer, VirtualLayer): x_var = None y_var = None raw_vars = [] for l in layer.layers: data_obj = getattr(cached_sg, l.access_name) raw_var = nc.variables[l.access_name] raw_vars.append(raw_var) if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z(layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 2: raw_data = raw_var[data_obj.center_slicing] else: raise BaseException('Unable to trim variable {0} data.'.format(l.access_name)) raw_data = avg_to_cell_center(raw_data, data_obj.center_axis) if x_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower() == 'x': x_var = raw_data elif data_obj.center_axis == 1: x_var = raw_data if y_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower() == 'y': y_var = raw_data elif data_obj.center_axis == 0: y_var = raw_data if x_var is None or y_var is None: raise BaseException('Unable to determine x and y variables.') dim_lengths = [ len(v.dimensions) for v in raw_vars ] if len(list(set(dim_lengths))) != 1: raise AttributeError('One or both of the specified variables has screwed up dimensions.') if request.GET['image_type'] == 'vectors': angles = cached_sg.angles[lon_obj.center_slicing] vectorstep = request.GET['vectorstep'] # don't do this if the vectorstep is 1; let's save a microsecond or two # it's identical to getting all the data if vectorstep > 1: data_dim = len(lon.shape) step_slice = (np.s_[::vectorstep],) * data_dim # make sure the vector step is used for all applicable dimensions lon = lon[step_slice] lat = lat[step_slice] x_var = x_var[step_slice] y_var = y_var[step_slice] angles = angles[step_slice] vectorscale = request.GET['vectorscale'] padding_factor = calc_safety_factor(vectorscale) # figure out the average distance between lat/lon points # do the math after taking into the vectorstep if specified spatial_idx_padding = calc_lon_lat_padding(lon, lat, padding_factor) spatial_idx = data_handler.lat_lon_subset_idx(lon, lat, lonmin=wgs84_bbox.minx, latmin=wgs84_bbox.miny, lonmax=wgs84_bbox.maxx, latmax=wgs84_bbox.maxy, padding=spatial_idx_padding ) subset_lon = self._spatial_data_subset(lon, spatial_idx) subset_lat = self._spatial_data_subset(lat, spatial_idx) # rotate vectors x_rot, y_rot = rotate_vectors(x_var, y_var, angles) spatial_subset_x_rot = self._spatial_data_subset(x_rot, spatial_idx) spatial_subset_y_rot = self._spatial_data_subset(y_rot, spatial_idx) return mpl_handler.quiver_response(subset_lon, subset_lat, spatial_subset_x_rot, spatial_subset_y_rot, request, vectorscale ) else: raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
def minmax(self, layer, request): time_index, time_value = self.nearest_time(layer, request.GET['time']) wgs84_bbox = request.GET['wgs84_bbox'] with self.dataset() as nc: cached_sg = load_grid(self.topology_file) lon_name, lat_name = cached_sg.face_coordinates lon_obj = getattr(cached_sg, lon_name) lat_obj = getattr(cached_sg, lat_name) #centers = cached_sg.centers centers = pair_arrays(cached_sg.center_lon, cached_sg.center_lat) lon = centers[..., 0][lon_obj.center_slicing] lat = centers[..., 1][lat_obj.center_slicing] spatial_idx = data_handler.lat_lon_subset_idx(lon, lat, lonmin=wgs84_bbox.minx, latmin=wgs84_bbox.miny, lonmax=wgs84_bbox.maxx, latmax=wgs84_bbox.maxy) subset_lon = np.unique(spatial_idx[0]) subset_lat = np.unique(spatial_idx[1]) grid_variables = cached_sg.grid_variables vmin = None vmax = None raw_data = None if isinstance(layer, Layer): data_obj = getattr(cached_sg, layer.access_name) raw_var = nc.variables[layer.access_name] if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z(layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, subset_lon, subset_lat] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, subset_lon, subset_lat] elif len(raw_var.shape) == 2: raw_data = raw_var[subset_lon, subset_lat] else: raise BaseException('Unable to trim variable {0} data.'.format(layer.access_name)) # handle grid variables if set([layer.access_name]).issubset(grid_variables): raw_data = avg_to_cell_center(raw_data, data_obj.center_axis) vmin = np.nanmin(raw_data).item() vmax = np.nanmax(raw_data).item() elif isinstance(layer, VirtualLayer): x_var = None y_var = None raw_vars = [] for l in layer.layers: data_obj = getattr(cached_sg, l.access_name) raw_var = nc.variables[l.access_name] raw_vars.append(raw_var) if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z(layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, subset_lon, subset_lat] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, subset_lon, subset_lat] elif len(raw_var.shape) == 2: raw_data = raw_var[subset_lon, subset_lat] else: raise BaseException('Unable to trim variable {0} data.'.format(l.access_name)) if x_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower() == 'x': x_var = raw_data elif data_obj.center_axis == 1: x_var = raw_data if y_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower() == 'y': y_var = raw_data elif data_obj.center_axis == 0: y_var = raw_data if ',' in layer.var_name and raw_data is not None: # Vectors, so return magnitude data = [ sqrt((u*u) + (v*v)) for (u, v,) in zip(x_var.flatten(), y_var.flatten()) if u != np.nan and v != np.nan] vmin = min(data) vmax = max(data) return gmd_handler.from_dict(dict(min=vmin, max=vmax))
def getmap(self, layer, request): time_index, time_value = self.nearest_time(layer, request.GET['time']) wgs84_bbox = request.GET['wgs84_bbox'] with self.dataset() as nc: cached_sg = load_grid(self.topology_file) lon_name, lat_name = cached_sg.face_coordinates lon_obj = getattr(cached_sg, lon_name) lat_obj = getattr(cached_sg, lat_name) lon = cached_sg.center_lon[lon_obj.center_slicing] lat = cached_sg.center_lat[lat_obj.center_slicing] if isinstance(layer, Layer): data_obj = getattr(cached_sg, layer.access_name) raw_var = nc.variables[layer.access_name] if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z(layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 2: raw_data = raw_var[data_obj.center_slicing] else: raise BaseException( 'Unable to trim variable {0} data.'.format( layer.access_name)) # handle edge variables if data_obj.location is not None and 'edge' in data_obj.location: raw_data = avg_to_cell_center(raw_data, data_obj.center_axis) if request.GET['image_type'] == 'pcolor': return mpl_handler.pcolormesh_response(lon, lat, data=raw_data, request=request) elif request.GET['image_type'] in [ 'filledhatches', 'hatches', 'filledcontours', 'contours' ]: return mpl_handler.contouring_response(lon, lat, data=raw_data, request=request) else: raise NotImplementedError( 'Image type "{}" is not supported.'.format( request.GET['image_type'])) elif isinstance(layer, VirtualLayer): x_var = None y_var = None raw_vars = [] for l in layer.layers: data_obj = getattr(cached_sg, l.access_name) raw_var = nc.variables[l.access_name] raw_vars.append(raw_var) if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z( layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]] elif len(raw_var.shape) == 2: raw_data = raw_var[data_obj.center_slicing] else: raise BaseException( 'Unable to trim variable {0} data.'.format( l.access_name)) raw_data = avg_to_cell_center(raw_data, data_obj.center_axis) if x_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower( ) == 'x': x_var = raw_data elif data_obj.center_axis == 1: x_var = raw_data if y_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower( ) == 'y': y_var = raw_data elif data_obj.center_axis == 0: y_var = raw_data if x_var is None or y_var is None: raise BaseException( 'Unable to determine x and y variables.') dim_lengths = [len(v.dimensions) for v in raw_vars] if len(list(set(dim_lengths))) != 1: raise AttributeError( 'One or both of the specified variables has incorrect dimensions.' ) if request.GET['image_type'] == 'vectors': angles = cached_sg.angles[lon_obj.center_slicing] vectorstep = request.GET['vectorstep'] # don't do this if the vectorstep is 1; let's save a microsecond or two # it's identical to getting all the data if vectorstep > 1: data_dim = len(lon.shape) step_slice = ( np.s_[::vectorstep], ) * data_dim # make sure the vector step is used for all applicable dimensions lon = lon[step_slice] lat = lat[step_slice] x_var = x_var[step_slice] y_var = y_var[step_slice] angles = angles[step_slice] vectorscale = request.GET['vectorscale'] padding_factor = calc_safety_factor(vectorscale) # figure out the average distance between lat/lon points # do the math after taking into the vectorstep if specified spatial_idx_padding = calc_lon_lat_padding( lon, lat, padding_factor) spatial_idx = data_handler.lat_lon_subset_idx( lon, lat, lonmin=wgs84_bbox.minx, latmin=wgs84_bbox.miny, lonmax=wgs84_bbox.maxx, latmax=wgs84_bbox.maxy, padding=spatial_idx_padding) subset_lon = self._spatial_data_subset(lon, spatial_idx) subset_lat = self._spatial_data_subset(lat, spatial_idx) # rotate vectors x_rot, y_rot = rotate_vectors(x_var, y_var, angles) spatial_subset_x_rot = self._spatial_data_subset( x_rot, spatial_idx) spatial_subset_y_rot = self._spatial_data_subset( y_rot, spatial_idx) return mpl_handler.quiver_response(subset_lon, subset_lat, spatial_subset_x_rot, spatial_subset_y_rot, request, vectorscale) else: raise NotImplementedError( 'Image type "{}" is not supported.'.format( request.GET['image_type']))
def minmax(self, layer, request): time_index, time_value = self.nearest_time(layer, request.GET['time']) wgs84_bbox = request.GET['wgs84_bbox'] with self.dataset() as nc: cached_sg = load_grid(self.topology_file) lon_name, lat_name = cached_sg.face_coordinates lon_obj = getattr(cached_sg, lon_name) lat_obj = getattr(cached_sg, lat_name) lon = cached_sg.center_lon[lon_obj.center_slicing] lat = cached_sg.center_lat[lat_obj.center_slicing] spatial_idx = data_handler.lat_lon_subset_idx( lon, lat, lonmin=wgs84_bbox.minx, latmin=wgs84_bbox.miny, lonmax=wgs84_bbox.maxx, latmax=wgs84_bbox.maxy) subset_lon = np.unique(spatial_idx[0]) subset_lat = np.unique(spatial_idx[1]) grid_variables = cached_sg.grid_variables vmin = None vmax = None raw_data = None if isinstance(layer, Layer): data_obj = getattr(cached_sg, layer.access_name) raw_var = nc.variables[layer.access_name] if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z(layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, subset_lon, subset_lat] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, subset_lon, subset_lat] elif len(raw_var.shape) == 2: raw_data = raw_var[subset_lon, subset_lat] else: raise BaseException( 'Unable to trim variable {0} data.'.format( layer.access_name)) # handle grid variables if set([layer.access_name]).issubset(grid_variables): raw_data = avg_to_cell_center(raw_data, data_obj.center_axis) vmin = np.nanmin(raw_data).item() vmax = np.nanmax(raw_data).item() elif isinstance(layer, VirtualLayer): x_var = None y_var = None raw_vars = [] for l in layer.layers: data_obj = getattr(cached_sg, l.access_name) raw_var = nc.variables[l.access_name] raw_vars.append(raw_var) if len(raw_var.shape) == 4: z_index, z_value = self.nearest_z( layer, request.GET['elevation']) raw_data = raw_var[time_index, z_index, subset_lon, subset_lat] elif len(raw_var.shape) == 3: raw_data = raw_var[time_index, subset_lon, subset_lat] elif len(raw_var.shape) == 2: raw_data = raw_var[subset_lon, subset_lat] else: raise BaseException( 'Unable to trim variable {0} data.'.format( l.access_name)) if x_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower( ) == 'x': x_var = raw_data elif data_obj.center_axis == 1: x_var = raw_data if y_var is None: if data_obj.vector_axis and data_obj.vector_axis.lower( ) == 'y': y_var = raw_data elif data_obj.center_axis == 0: y_var = raw_data if ',' in layer.var_name and raw_data is not None: # Vectors, so return magnitude data = [ sqrt((u * u) + (v * v)) for ( u, v, ) in zip(x_var.flatten(), y_var.flatten()) if u != np.nan and v != np.nan ] vmin = min(data) vmax = max(data) return gmd_handler.from_dict(dict(min=vmin, max=vmax))