def test_iter_array(self): values = np.random.rand(2,2,4,4) mask = np.random.random_integers(0,1,values.shape) values = np.ma.array(values,mask=mask) for idx in iter_array(values): self.assertFalse(values.mask[idx]) self.assertEqual(len(list(iter_array(values,use_mask=True))),len(values.compressed())) self.assertEqual(len(list(iter_array(values,use_mask=False))),len(values.data.flatten()))
def clip(coll,igeom): '''Do an intersects + intersection and set weights based on geometry areas. coll :: OcgCollection igeom :: Shapely Polygon or MultiPolygon returns OcgCollection''' ## logic for convenience. just return the provided collection if a NoneType ## is passed for the 'igeom' arugment if igeom is not None: ## take advange of shapely speedups prep_igeom = prepared.prep(igeom) ## the weight array weights = np.empty(coll.gid.shape,dtype=float) weights = np.ma.array(weights,mask=coll.gid.mask) ## do the spatial operation for idx,geom in iter_array(coll.geom_masked, return_value=True): # import ipdb;ipdb.set_trace() if keep(prep_igeom,igeom,geom): # import ipdb;ipdb.set_trace() new_geom = igeom.intersection(geom) weights[idx] = new_geom.area coll.geom[idx] = new_geom ## set maximum weight to one coll.weights = weights/weights.max() return(coll)
def _iter_consecutive_(self,values,threshold,operation): ## time index reference ref = np.arange(0,values.shape[0]) ## perform requested logical operation if operation == 'gt': arr = values > threshold elif operation == 'lt': arr = values < threshold elif operation == 'gte': arr = values >= threshold elif operation == 'lte': arr = values <= threshold ## find longest sequence for each geometry across time dimension for zidx,rowidx,colidx in iter_array(values[0,:,:,:],use_mask=False): vec = arr[:,zidx,rowidx,colidx] ## check first if there is a longer series than 1 if np.any(np.diff(ref[vec]) == 1): ## find locations where the values switch diff_idx = np.diff(vec) if diff_idx.shape != ref.shape: diff_idx = np.append(diff_idx,[False]) split_idx = ref[diff_idx] + 1 splits = np.array_split(vec,split_idx) fill = [a.sum() for a in splits if np.all(a)] ## case of only a singular occurrence elif np.any(vec): fill = [1] ## case for no occurrence else: fill = [0] yield(fill)
def wrap_or_unwrap(self, action, target, force=False): from ocgis.variable.geom import GeometryVariable from ocgis.spatial.grid import Grid if action not in (WrapAction.WRAP, WrapAction.UNWRAP): raise ValueError('"action" not recognized: {}'.format(action)) if target.wrapped_state != action or force: if action == WrapAction.WRAP: attr = 'wrap' else: attr = 'unwrap' if isinstance(target, GeometryVariable): w = GeometryWrapper() func = getattr(w, attr) target_value = target.get_value() for idx, target_geom in iter_array(target_value, use_mask=True, return_value=True, mask=target.get_mask()): target_value.__setitem__(idx, func(target_geom)) elif isinstance(target, Grid): ca = CoordinateArrayWrapper() func = getattr(ca, attr) func(target.x.get_value()) target.remove_bounds() if target.has_allocated_point: getattr(target.get_point(), attr)() if target.has_allocated_polygon: getattr(target.get_polygon(), attr)() else: raise NotImplementedError(target)
def write(self,path): geoms = [] uid = self.spatial.uid for ii,geom in iter_array(self.spatial.geom,return_value=True): geoms.append({'geom':geom,'ugid':uid[ii]}) sc = ShpCabinet() sc.write(geoms,path,sr=self.spatial.projection.sr)
def write_RCM3(self): rd = self.oblique_mercator ds = nc.Dataset(rd.uri) path = os.path.join(tempfile.mkdtemp(prefix='RCM3'),'RCM3.shp') crs = fiona.crs.from_epsg(4326) driver = 'ESRI Shapefile' schema = {'geometry':'Point', 'properties':{}} # path = os.path.join(tempfile.mkdtemp(prefix='RCM3'),'RCM3.shp') # polygon = Polygon(coordinates) # feature = {'id':feature_idx,'properties':{},'geometry':mapping(polygon)} # f.write(feature) # with fiona.open(out_path,'w',driver=driver,crs=crs,schema=schema) as f: try: lats = ds.variables['lat'][:] lons = ds.variables['lon'][:] - 360 n = lons.shape[0]*lons.shape[1] print n with fiona.open(path,'w',driver=driver,crs=crs,schema=schema) as f: for ctr,(ii,jj) in enumerate(iter_array(lats,use_mask=False)): if ctr % 100 == 0: print ctr,n point = Point(lons[ii,jj],lats[ii,jj]) feature = {'properties':{},'geometry':mapping(point)} f.write(feature) import ipdb;ipdb.set_trace() finally: ds.close()
def write_RCM3(self): rd = self.oblique_mercator ds = nc.Dataset(rd.uri) path = os.path.join(tempfile.mkdtemp(prefix='RCM3'), 'RCM3.shp') crs = fiona.crs.from_epsg(4326) driver = 'ESRI Shapefile' schema = {'geometry': 'Point', 'properties': {}} # path = os.path.join(tempfile.mkdtemp(prefix='RCM3'),'RCM3.shp') # polygon = Polygon(coordinates) # feature = {'id':feature_idx,'properties':{},'geometry':mapping(polygon)} # f.write(feature) # with fiona.open(out_path,'w',driver=driver,crs=crs,schema=schema) as f: try: lats = ds.variables['lat'][:] lons = ds.variables['lon'][:] - 360 n = lons.shape[0] * lons.shape[1] print n with fiona.open(path, 'w', driver=driver, crs=crs, schema=schema) as f: for ctr, (ii, jj) in enumerate(iter_array(lats, use_mask=False)): if ctr % 100 == 0: print ctr, n point = Point(lons[ii, jj], lats[ii, jj]) feature = {'properties': {}, 'geometry': mapping(point)} f.write(feature) import ipdb ipdb.set_trace() finally: ds.close()
def get_datetime(self,arr): arr = np.atleast_1d(nc.num2date(arr,self.units,calendar=self.calendar)) dt = datetime.datetime for idx,t in iter_array(arr,return_value=True): arr[idx] = dt(t.year,t.month,t.day, t.hour,t.minute,t.second) return(arr)
def area(self): r_value = self.value fill = np.ones(r_value.shape,dtype=constants.np_float) fill = np.ma.array(fill,mask=r_value.mask) for (ii,jj),geom in iter_array(r_value,return_value=True): fill[ii,jj] = geom.area return(fill)
def update_crs(self,to_crs): ## if the crs values are the same, pass through if to_crs != self.crs: to_sr = to_crs.sr from_sr = self.crs.sr if self.geom.point is not None: self.geom.point.update_crs(to_sr,from_sr) try: self.geom.polygon.update_crs(to_sr,from_sr) except ImproperPolygonBoundsError: pass if self.grid is not None and self.geom.point is not None: r_grid_value = self.grid.value.data r_point_value = self.geom.point.value.data for (idx_row,idx_col),geom in iter_array(r_point_value,return_value=True,use_mask=False): x,y = geom.x,geom.y r_grid_value[0,idx_row,idx_col] = y r_grid_value[1,idx_row,idx_col] = x ## remove row and columns if they exist as this requires interpolation ## to make them vectors again. self.grid.row = None self.grid.col = None ## if there is not point dimension, then a grid representation is not ## possible. mask the grid values accordingly. elif self.grid is not None and self.geom.point is None: self.grid.value.mask = True self.crs = to_crs
def wrap_or_unwrap(self, action, target): from ocgis.variable.geom import GeometryVariable from ocgis.spatial.grid import Grid if action not in (WrapAction.WRAP, WrapAction.UNWRAP): raise ValueError('"action" not recognized: {}'.format(action)) if action == WrapAction.WRAP: attr = 'wrap' else: attr = 'unwrap' if isinstance(target, GeometryVariable): w = GeometryWrapper() func = getattr(w, attr) target_value = target.get_value() for idx, target_geom in iter_array(target_value, use_mask=True, return_value=True, mask=target.get_mask()): target_value.__setitem__(idx, func(target_geom)) elif isinstance(target, Grid): ca = CoordinateArrayWrapper() func = getattr(ca, attr) func(target.x.get_value()) target.remove_bounds() if target.has_allocated_point: getattr(target.get_point(), attr)() if target.has_allocated_polygon: getattr(target.get_polygon(), attr)() else: raise NotImplementedError(target)
def intersects(self, polygon): ## do the initial grid subset grid = self.grid.subset(polygon=polygon) ## a prepared polygon prep_polygon = prepared.prep(polygon) ## the fill arrays geom = np.ones(grid.shape, dtype=object) geom = np.ma.array(geom, mask=True) geom_mask = geom.mask try: row = grid.row.value col = grid.column.value for ii, jj in product(range(row.shape[0]), range(col.shape[0])): pt = Point(col[jj], row[ii]) geom[ii, jj] = pt if prep_polygon.intersects(pt): geom_mask[ii, jj] = False else: geom_mask[ii, jj] = True ## NcGridMatrixDimension correction except AttributeError: _row = grid.row _col = grid.column for ii, jj in iter_array(_row): pt = Point(_col[ii, jj], _row[ii, jj]) geom[ii, jj] = pt if prep_polygon.intersects(pt): geom_mask[ii, jj] = False else: geom_mask[ii, jj] = True ret = self.__class__(grid=grid, geom=geom, uid=grid.uid) return (ret)
def unwrap(self, spatial): """ :type spatial: :class:`ocgis.interface.base.dimension.spatial.SpatialDimension` """ if self.get_wrapped_state(spatial) == self._flag_wrapped: # unwrap the geometries unwrap = Wrapper().unwrap to_wrap = self._get_to_wrap_(spatial) for tw in to_wrap: if tw is not None: geom = tw.value.data for (ii, jj), to_wrap in iter_array(geom, return_value=True, use_mask=False): geom[ii, jj] = unwrap(to_wrap) if spatial._grid is not None: ref = spatial.grid.value.data[1, :, :] select = ref < 0 ref[select] += 360 if spatial.grid.col is not None: ref = spatial.grid.col.value select = ref < 0 ref[select] += 360 if spatial.grid.col.bounds is not None: ref = spatial.grid.col.bounds select = ref < 0 ref[select] += 360 # attempt to to unwrap the grid corners if they exist if spatial.grid.corners is not None: select = spatial.grid.corners[1] < 0 spatial.grid.corners[1][select] += 360 else: raise SpatialWrappingError('Data does not need to be unwrapped.')
def clip(coll, igeom): '''Do an intersects + intersection and set weights based on geometry areas. coll :: OcgCollection igeom :: Shapely Polygon or MultiPolygon returns OcgCollection''' ## logic for convenience. just return the provided collection if a NoneType ## is passed for the 'igeom' arugment if igeom is not None: ## take advange of shapely speedups prep_igeom = prepared.prep(igeom) ## the weight array weights = np.empty(coll.gid.shape, dtype=float) weights = np.ma.array(weights, mask=coll.gid.mask) ## do the spatial operation for idx, geom in iter_array(coll.geom_masked, return_value=True): # import ipdb;ipdb.set_trace() if keep(prep_igeom, igeom, geom): # import ipdb;ipdb.set_trace() new_geom = igeom.intersection(geom) weights[idx] = new_geom.area coll.geom[idx] = new_geom ## set maximum weight to one coll.weights = weights / weights.max() return (coll)
def get_buffer(self, *args, **kwargs): """ Return a shallow copy of the geometry variable with geometries buffered. .. note:: Accepts all parameters to :meth:`shapely.geometry.base.BaseGeometry.buffer`. An additional keyword argument is: :keyword str geom_type: The geometry type for the new buffered geometry if known in advance. :rtype: :class:`~ocgis.GeometryVariable` :raises: :class:`~ocgis.exc.EmptyObjectError` """ raise_if_empty(self) # New geometry type for the buffered object. geom_type = kwargs.pop('geom_type', 'auto') ret = self.copy() new_value = np.empty_like(ret.get_value(), dtype=object) to_buffer = self.get_value() mask = self.get_mask() for idx, mask_value in iter_array(mask, return_value=True): if not mask_value: new_value[idx] = to_buffer[idx].buffer(*args, **kwargs) else: new_value[idx] = None ret.set_value(new_value) ret._geom_type = geom_type return ret
def write(self, path): geoms = [] uid = self.spatial.uid for ii, geom in iter_array(self.spatial.geom, return_value=True): geoms.append({'geom': geom, 'ugid': uid[ii]}) sc = ShpCabinet() sc.write(geoms, path, sr=self.spatial.projection.sr)
def get_datetime(self, arr): """ :param arr: An array of floats to convert ``datetime``-like objects. :type arr: :class:`numpy.ndarray` :returns: ``object`` array of the same shape as ``arr`` with float objects converted to ``datetime`` objects. :rtype: :class:`numpy.ndarray` """ # If there are month units, call the special procedure to convert those to datetime objects. if not self._has_months_units: arr = np.atleast_1d(nc.num2date(arr, str(self.units), calendar=self.calendar)) dt = get_datetime_or_netcdftime for idx, t in iter_array(arr, return_value=True): # Attempt to convert times to datetime objects. try: arr[idx] = dt(t.year, t.month, t.day, t.hour, t.minute, t.second) # This may fail for some calendars, in that case maintain the instance object returned from netcdftime. # See: http://netcdf4-python.googlecode.com/svn/trunk/docs/netcdftime.netcdftime.datetime-class.html except ValueError: arr[idx] = arr[idx] else: arr = get_datetime_from_months_time_units(arr, str(self.units), month_centroid=constants.CALC_MONTH_CENTROID) return arr
def calc_weights(self,npd,geom): weight = np.ma.array(np.zeros((npd.shape[2],npd.shape[3]),dtype=float), mask=npd.mask[0,0,:,:]) for ii,jj in iter_array(weight): weight[ii,jj] = geom[ii,jj].area weight = weight/weight.max() return(weight)
def _iter_consecutive_(self, values, threshold, operation): ## time index reference ref = np.arange(0, values.shape[0]) ## perform requested logical operation if operation == 'gt': arr = values > threshold elif operation == 'lt': arr = values < threshold elif operation == 'gte': arr = values >= threshold elif operation == 'lte': arr = values <= threshold ## find longest sequence for each geometry across time dimension for zidx, rowidx, colidx in iter_array(values[0, :, :, :], use_mask=False): vec = arr[:, zidx, rowidx, colidx] ## check first if there is a longer series than 1 if np.any(np.diff(ref[vec]) == 1): ## find locations where the values switch diff_idx = np.diff(vec) if diff_idx.shape != ref.shape: diff_idx = np.append(diff_idx, [False]) split_idx = ref[diff_idx] + 1 splits = np.array_split(vec, split_idx) fill = [a.sum() for a in splits if np.all(a)] ## case of only a singular occurrence elif np.any(vec): fill = [1] ## case for no occurrence else: fill = [0] yield (fill)
def intersects(self,polygon): ## do the initial grid subset grid = self.grid.subset(polygon=polygon) ## a prepared polygon prep_polygon = prepared.prep(polygon) ## the fill arrays geom = np.ones(grid.shape,dtype=object) geom = np.ma.array(geom,mask=True) geom_mask = geom.mask try: row = grid.row.value col = grid.column.value for ii,jj in product(range(row.shape[0]),range(col.shape[0])): pt = Point(col[jj],row[ii]) geom[ii,jj] = pt if prep_polygon.intersects(pt): geom_mask[ii,jj] = False else: geom_mask[ii,jj] = True ## NcGridMatrixDimension correction except AttributeError: _row = grid.row _col = grid.column for ii,jj in iter_array(_row): pt = Point(_col[ii,jj],_row[ii,jj]) geom[ii,jj] = pt if prep_polygon.intersects(pt): geom_mask[ii,jj] = False else: geom_mask[ii,jj] = True ret = self.__class__(grid=grid,geom=geom,uid=grid.uid) return(ret)
def _calculate_(self,values,percentile=None,operation=None): ## first map the dates to dynamic percentiles index days from ocgis import env day_idx = self._get_day_index_(self.dataset.temporal.value[self._curr_group]) dy_day_idx = map(self._get_dynamic_index_,day_idx.flat) gp = self._get_geometries_with_percentiles_(env.ops.dataset[0].variable,env.ops.geom.key,env.DIR_BIN,percentile) ## get threshold for each geometry ## special case for north carolina counties if env.ops.geom.key == 'us_counties': select_ugid = 39 else: select_ugid = self.dataset.spatial._ugid ugid_ref = gp[select_ugid] compare = np.empty_like(values,dtype=float) for ii,jj,kk,ll in iter_array(values): ## get the geometry id gid = self.dataset.spatial.vector.uid[kk,ll] gid_ref = ugid_ref[gid] percentile_static = gid_ref[dy_day_idx[ii]] compare[ii,jj,kk,ll] = percentile_static ## perform requested logical operation if operation == 'gt': idx = values > compare elif operation == 'lt': idx = values < compare elif operation == 'gte': idx = values >= compare elif operation == 'lte': idx = values <= compare else: raise(NotImplementedError('The operation "{0}" was not recognized.'.format(operation))) ret = np.ma.sum(idx,axis=0) return(ret)
def test_geom_polygon_bounds(self): sdim = self.get_sdim(bounds=True) poly = sdim.geom.polygon.value fill = np.ma.array(np.zeros((2,3,4)),mask=False) for idx_row,idx_col in iter_array(poly): fill[0,idx_row,idx_col] = poly[idx_row,idx_col].centroid.y fill[1,idx_row,idx_col] = poly[idx_row,idx_col].centroid.x self.assertNumpyAll(fill,sdim.grid.value)
def get_iter(self): geoms = self.vector.geom name_id = self._name_id uid = self.vector.uid ret = {} for ii,jj in iter_array(geoms): ret[name_id] = uid[ii,jj] yield(((ii,jj),geoms[ii,jj],ret))
def _get_all_geoms_(self): geom = np.empty(self.col_pt.shape,dtype=object) for ii,jj in iter_array(self.col_pt,use_mask=False): geom[ii,jj] = Point(self.col_pt[ii,jj],self.row_pt[ii,jj]) row = self.real_row.reshape(-1) col = self.real_col.reshape(-1) return(geom,row,col)
def calculate(self, values, v=None, mode='same'): """ :param values: Array containing variable values. :type values: :class:`numpy.ma.core.MaskedArray` :param v: The one-dimensional array to convolve with ``values``. :type v: :class:`numpy.core.multiarray.ndarray` :param str mode: The convolution mode. See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html. The output mode ``full`` is not supported. :rtype: :class:`numpy.ma.core.MaskedArray` :raises: AssertionError """ # 'full' is not supported as this would add dates to the temporal dimension assert (mode in ('same', 'valid')) assert (len(values.shape) == 5) # just to be safe, convert the second array to the same input data types as the values v = v.astype(values.dtype) # valid will have less values than the input as this checks if the two convolved arrays completely overlap shape_fill = list(values.shape) # if mode == 'valid': # shape_fill[1] = max(values.shape[1], v.shape[0]) - min(values.shape[1], v.shape[0]) + 1 fill = np.zeros(shape_fill, dtype=self.dtype) # perform the convolution on the time axis itr = iter_array(values) for ie, it, il, ir, ic in itr: a = values[ie, :, il, ir, ic] res_convolve = np.convolve(a, v, mode=mode) if mode == 'valid': time_slice = slice( 0, max(values.shape[1], v.shape[0]) - min(values.shape[1], v.shape[0]) + 1) # fill[ie, :, il, ir, ic] = res_convolve else: time_slice = slice(None) fill[ie, time_slice, il, ir, ic] = res_convolve if mode == 'valid': # generate the mask for the output data and convert the output to a masked array mask = np.empty(fill.shape, dtype=bool) mask[...] = values.mask[0, 0, 0, :, :] fill = np.ma.array(fill, mask=mask) # identify where the two arrays completely overlap and collect the indices to subset the field object # attached to the calculation object overlap_mask = np.ones(mask.shape, dtype=bool) overlap_mask[:, slice(0, 0 - (v.shape[0] - 1)), :, :, :] = False fill.mask[:] = np.logical_or(fill.mask, overlap_mask) # self.field = self.field[:, slice(0, 0-(v.shape[0]-1)), :, :, :] else: # same does not modify the output array size fill = np.ma.array(fill, mask=values.mask) return fill
def get_approx_res_days(self): diffs = np.array([],dtype=float) for tidx,tval in iter_array(self.value,return_value=True): try: diffs = np.append(diffs, np.abs((tval-self.value[tidx[0]+1]).days)) except IndexError: break return(diffs.mean())
def get_iter(self): geoms = self.vector.geom name_id = self._name_id uid = self.vector.uid ret = {} for ii, jj in iter_array(geoms): ret[name_id] = uid[ii, jj] yield (((ii, jj), geoms[ii, jj], ret))
def iter_spatial_dimension(self,dim): geoms = dim.vector.geom name_id = dim._name_id uid = dim.vector.uid ret = {} for (ii,jj),geom in iter_array(geoms,return_value=True): ret[name_id] = uid[ii,jj] yield(((ii,jj),geom,ret))
def iter_spatial_dimension(self, dim): geoms = dim.vector.geom name_id = dim._name_id uid = dim.vector.uid ret = {} for (ii, jj), geom in iter_array(geoms, return_value=True): ret[name_id] = uid[ii, jj] yield (((ii, jj), geom, ret))
def __iter__(self): _name_uid = self._name_uid _name_value = self._name_value uid = self.uid _conv_to_multi_ = self._conv_to_multi_ for idx,geom in iter_array(self.value,return_value=True): row = {_name_uid:uid[idx], _name_value:_conv_to_multi_(geom)} yield(idx,row)
def weights(self): if self._weights is None: geom = self.geom weights = np.ones(geom.shape,dtype=float) weights = np.ma.array(weights,mask=geom.mask) for ii,jj in iter_array(geom): weights[ii,jj] = geom[ii,jj].area weights = weights/weights.max() self._weights = weights return(self._weights)
def resolution(self): diffs = np.array([], dtype=float) value = self.value for tidx, tval in iter_array(value, return_value=True): try: diffs = np.append(diffs, np.abs((tval - value[tidx[0] + 1]).days)) except IndexError: break return (diffs.mean())
def resolution(self): diffs = np.array([],dtype=float) value = self.value for tidx,tval in iter_array(value,return_value=True): try: diffs = np.append(diffs, np.abs((tval-value[tidx[0]+1]).days)) except IndexError: break return(diffs.mean())
def weights(self): if self._weights is None: geom = self.geom weights = np.ones(geom.shape, dtype=float) weights = np.ma.array(weights, mask=geom.mask) for ii, jj in iter_array(geom): weights[ii, jj] = geom[ii, jj].area weights = weights / weights.max() self._weights = weights return (self._weights)
def test_geom_point(self): sdim = self.get_sdim(bounds=True) with self.assertRaises(AttributeError): sdim.geom.value pt = sdim.geom.point.value fill = np.ma.array(np.zeros((2,3,4)),mask=False) for idx_row,idx_col in iter_array(pt): fill[0,idx_row,idx_col] = pt[idx_row,idx_col].y fill[1,idx_row,idx_col] = pt[idx_row,idx_col].x self.assertNumpyAll(fill,sdim.grid.value)
def write(self, path): geoms = [] uid = self.spatial.uid attrs = self.spatial.attrs for ii, geom in iter_array(self.spatial.geom, return_value=True): dct = {'geom': geom, 'UGID': uid[ii]} for k, v in attrs.iteritems(): dct[k] = v[ii] geoms.append(dct) sc = ShpCabinet() sc.write(geoms, path, sr=self.spatial.projection.sr)
def calculate(self, values, v=None, mode='same'): """ :param values: Array containing variable values. :type values: :class:`numpy.ma.core.MaskedArray` :param v: The one-dimensional array to convolve with ``values``. :type v: :class:`numpy.core.multiarray.ndarray` :param str mode: The convolution mode. See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html. The output mode ``full`` is not supported. :rtype: :class:`numpy.ma.core.MaskedArray` :raises: AssertionError """ # 'full' is not supported as this would add dates to the temporal dimension assert (mode in ('same', 'valid')) assert (len(values.shape) == 5) # just to be safe, convert the second array to the same input data types as the values v = v.astype(values.dtype) # valid will have less values than the input as this checks if the two convolved arrays completely overlap shape_fill = list(values.shape) # if mode == 'valid': # shape_fill[1] = max(values.shape[1], v.shape[0]) - min(values.shape[1], v.shape[0]) + 1 fill = np.zeros(shape_fill, dtype=self.dtype) # perform the convolution on the time axis itr = iter_array(values) for ie, it, il, ir, ic in itr: a = values[ie, :, il, ir, ic] res_convolve = np.convolve(a, v, mode=mode) if mode == 'valid': time_slice = slice(0, max(values.shape[1], v.shape[0]) - min(values.shape[1], v.shape[0]) + 1) # fill[ie, :, il, ir, ic] = res_convolve else: time_slice = slice(None) fill[ie, time_slice, il, ir, ic] = res_convolve if mode == 'valid': # generate the mask for the output data and convert the output to a masked array mask = np.empty(fill.shape, dtype=bool) mask[...] = values.mask[0, 0, 0, :, :] fill = np.ma.array(fill, mask=mask) # identify where the two arrays completely overlap and collect the indices to subset the field object # attached to the calculation object overlap_mask = np.ones(mask.shape, dtype=bool) overlap_mask[:, slice(0, 0 - (v.shape[0] - 1)), :, :, :] = False fill.mask[:] = np.logical_or(fill.mask, overlap_mask) # self.field = self.field[:, slice(0, 0-(v.shape[0]-1)), :, :, :] else: # same does not modify the output array size fill = np.ma.array(fill, mask=values.mask) return fill
def write(self,path): geoms = [] uid = self.spatial.uid attrs = self.spatial.attrs for ii,geom in iter_array(self.spatial.geom,return_value=True): dct = {'geom':geom,'UGID':uid[ii]} for k,v in attrs.iteritems(): dct[k] = v[ii] geoms.append(dct) sc = ShpCabinet() sc.write(geoms,path,sr=self.spatial.projection.sr)
def wrap_var(var): right_clip = make_poly((-90,90),(180,360)) left_clip = make_poly((-90,90),(-180,180)) def _shift_(geom): try: coords = np.array(geom.exterior.coords) coords[:,0] = coords[:,0] - 360 ret = Polygon(coords) except AttributeError: polygons = np.empty(len(geom),dtype=object) for ii,polygon in enumerate(geom): coords = np.array(polygon.exterior.coords) coords[:,0] = coords[:,0] - 360 polygons[ii] = Polygon(coords) ret = MultiPolygon(polygons) return(ret) geoms = var.spatial._value if not isinstance(geoms[0,0],Point): for idx,geom in iter_array(geoms,return_value=True): bounds = np.array(geom.bounds) if np.all([bounds[0] > 180,bounds[2] > 180]): geoms[idx] = _shift_(geom) elif bounds[1] <= 180 and bounds[2] > 180: left = [poly for poly in _get_iter_(geom.intersection(left_clip))] right = [poly for poly in _get_iter_(_shift_(geom.intersection(right_clip)))] try: geoms[idx] = MultiPolygon(left+right) except TypeError: left = filter(lambda x: type(x) != LineString,left) right = filter(lambda x: type(x) != LineString,right) geoms[idx] = MultiPolygon(left+right) else: continue else: for idx,geom in iter_array(geoms,return_value=True): if geom.x > 180: geoms[idx] = Point(geom.x-360,geom.y)
def grid(self): if self._grid is None and self._geom_to_grid: ## populate the grid using the point geometry representation ref_pv = self.geom.point.value shp = (2,ref_pv.shape[0],ref_pv.shape[1]) fill = np.empty(shp,dtype=constants.np_float) for (idx_row,idx_col),geom in iter_array(ref_pv.data,return_value=True): fill[:,idx_row,idx_col] = geom.y,geom.x mask = np.empty_like(fill,dtype=bool) mask[:,:,:] = ref_pv.mask self._grid = SpatialGridDimension(value=np.ma.array(fill,mask=mask), uid=self.geom.point.uid) return(self._grid)
def _get_weights_(self): value = self._value value_mask = self._value_mask if isinstance(value[0,0],Point) or env.OPTIMIZE_FOR_CALC: weights = np.ones(value.shape,dtype=float) weights_ma = np.ma.array(weights,mask=value_mask) else: weights = np.empty(value.shape,dtype=float) for idx,geom in iter_array(value,return_value=True): weights[idx] = geom.area weights_ma = np.ma.array(weights/weights.max(),mask=value_mask) return(weights_ma)
def _get_value_(self): ## we are interested in creating geometries for all the underly coordinates ## regardless if the data is masked ref_grid = self.grid.value.data fill = self._get_geometry_fill_() r_data = fill.data for idx_row,idx_col in iter_array(ref_grid[0],use_mask=False): y = ref_grid[0,idx_row,idx_col] x = ref_grid[1,idx_row,idx_col] pt = Point(x,y) r_data[idx_row,idx_col] = pt return(fill)
def assertGridCorners(self, grid): """ :type grid: :class:`ocgis.new_interface.grid.Grid` """ assert grid.corners is not None def _get_is_ascending_(arr): """ Return ``True`` if the array is ascending from index 0 to -1. :type arr: :class:`numpy.ndarray` :rtype: bool """ assert arr.ndim == 1 if arr[0] < arr[-1]: ret = True else: ret = False return ret # Assert polygon constructed from grid corners contains the associated centroid value. for ii, jj in itertools.product(list(range(grid.shape[0])), list(range(grid.shape[1]))): pt = Point(grid.get_value().data[1, ii, jj], grid.get_value().data[0, ii, jj]) poly_corners = grid.corners.data[:, ii, jj] rtup = (poly_corners[0, :].min(), poly_corners[0, :].max()) ctup = (poly_corners[1, :].min(), poly_corners[1, :].max()) poly = make_poly(rtup, ctup) self.assertTrue(poly.contains(pt)) # Assert masks are equivalent between value and corners. for (ii, jj), m in iter_array(grid.get_value().mask[0, :, :], return_value=True): if m: self.assertTrue(grid.corners.mask[:, ii, jj].all()) else: self.assertFalse(grid.corners.mask[:, ii, jj].any()) grid_y = grid._y grid_x = grid._x if grid_y is not None or grid_x is not None: self.assertEqual( _get_is_ascending_(grid_y.get_value()), _get_is_ascending_(grid.corners.data[0, :, 0][:, 0])) self.assertEqual( _get_is_ascending_(grid_x.get_value()), _get_is_ascending_(grid.corners.data[1, 0, :][:, 0]))
def _get_all_geoms_(self): geom = np.empty(self.shape,dtype=object) min_col,max_col,min_row,max_row = self.min_col,self.max_col,self.min_row,self.max_row for ii,jj in iter_array(geom,use_mask=False): geom[ii,jj] = Polygon(((min_col[ii,jj],min_row[ii,jj]), (max_col[ii,jj],min_row[ii,jj]), (max_col[ii,jj],max_row[ii,jj]), (min_col[ii,jj],max_row[ii,jj]))) row = self.real_row.reshape(-1) col = self.real_col.reshape(-1) return(geom,row,col)
def update_crs(self,to_sr,from_sr): ## we are modifying the original source data and need to copy the new ## values. new_value = self.value.copy() ## be sure and project masked geometries to maintain underlying geometries ## for masked values. r_value = new_value.data r_loads = wkb.loads for (idx_row,idx_col),geom in iter_array(r_value,return_value=True,use_mask=False): ogr_geom = CreateGeometryFromWkb(geom.wkb) ogr_geom.AssignSpatialReference(from_sr) ogr_geom.TransformTo(to_sr) r_value[idx_row,idx_col] = r_loads(ogr_geom.ExportToWkb()) self._value = new_value
def _iter_value_(self, var): ## TODO: optimize if len(var.calc_value) > 0: for k, v in var.calc_value.iteritems(): for gidx0, gidx1 in iter_array(var.spatial.value): for tidx, lidx in itertools.product( range(v.shape[0]), range(v.shape[1])): value = v[tidx, lidx, gidx0, gidx1] tgid = var.temporal_group.uid[tidx] yield (tidx, lidx, gidx0, gidx1, value, k, tgid) # for (tidx,lidx,gidx0,gidx1),value in iter_array(v,return_value=True): # to_get = np.empty((1,var.temporal_group.value.shape[1]+2),dtype=object) # to_get[:,0:-2] = var.temporal_group.value[tidx,:] # to_get[:,-2:] = var.temporal_group.bounds[tidx,:] # tgid = self.tgid.get(to_get) # yield(tidx,lidx,gidx0,gidx1,value,k,tgid) elif type(var) == OcgMultivariateCalculationVariable: for gidx0, gidx1 in iter_array(var.spatial.value): for tidx, lidx in itertools.product(range(var.value.shape[0]), range(var.value.shape[1])): value = var.value[tidx, lidx, gidx0, gidx1] if var.temporal_group is None: tgid = None else: tgid = var.temporal_group.uid[tidx] yield (tidx, lidx, gidx0, gidx1, value, var.name, tgid) elif self.mode == 'raw': for gidx0, gidx1 in iter_array(var.spatial.value): for tidx, lidx in itertools.product(range(var.value.shape[0]), range(var.value.shape[1])): value = var.value[tidx, lidx, gidx0, gidx1] yield (tidx, lidx, gidx0, gidx1, value, None, None) else: if self.mode == 'calc' and type(var) == OcgVariable: pass else: raise (NotImplementedError)
def _select_(self,polygon): geom = np.empty(self.shape,dtype=object) row = np.array([],dtype=int) col = np.array([],dtype=int) # append = append prep_polygon = prepared.prep(polygon) for ii,jj in iter_array(self.col_pt,use_mask=False): pt = Point(self.col_pt[ii,jj],self.row_pt[ii,jj]) geom[ii,jj] = pt if prep_polygon.intersects(pt): append(row,self.real_row[ii,jj]) append(col,self.real_col[ii,jj]) return(geom,row,col)
def _select_(self,polygon): # prep_polygon = prepared.prep(polygon) emin_col,emin_row,emax_col,emax_row = polygon.envelope.bounds smin_col = contains(self.min_col, emin_col,emax_col, self.resolution) smax_col = contains(self.max_col, emin_col,emax_col, self.resolution) smin_row = contains(self.min_row, emin_row,emax_row, self.resolution) smax_row = contains(self.max_row, emin_row,emax_row, self.resolution) include = np.any((smin_col,smax_col),axis=0)*\ np.any((smin_row,smax_row),axis=0) from ocgis.util.spatial import index as si grid = si.build_index_grid(30.0,polygon) index = si.build_index(polygon,grid) index_intersects = si.index_intersects ## construct the reference matrices geom = np.empty(self.shape,dtype=object) row = np.array([],dtype=int) col = np.array([],dtype=int) real_row = self.real_row real_col = self.real_col min_row = self.min_row min_col = self.min_col max_row = self.max_row max_col = self.max_col # append = append for ii,jj in iter_array(include,use_mask=False): if include[ii,jj]: test_geom = Polygon(((min_col[ii,jj],min_row[ii,jj]), (max_col[ii,jj],min_row[ii,jj]), (max_col[ii,jj],max_row[ii,jj]), (min_col[ii,jj],max_row[ii,jj]))) geom[ii,jj] = test_geom if index_intersects(test_geom,index): append(row,real_row[ii,jj]) append(col,real_col[ii,jj]) return(geom,row,col)
def get_intersection(self, *args, **kwargs): """ .. note:: Accepts all parameters to :meth:`~ocgis.new_interface.geom.GeometryVariable.get_intersects`. Same return types. Additional arguments and/or keyword arguments are: :keyword bool inplace: (``=False``) If ``False`` (the default), deep copy the geometry array on the output before executing an intersection. If ``True``, modify the geometries in-place. :keyword bool intersects_check: (``=True``) If ``True`` (the default), first perform an intersects operation to limit the geometries tests for intersection. If ``False``, perform the intersection as is. """ inplace = kwargs.pop(KeywordArgument.INPLACE, False) intersects_check = kwargs.pop(KeywordArgument.INTERSECTS_CHECK, True) return_slice = kwargs.get(KeywordArgument.RETURN_SLICE, False) subset_geometry = args[0] if intersects_check: ret = self.get_intersects(*args, **kwargs) else: if inplace: ret = self else: ret = self.copy() if intersects_check: # If indices are being returned, this will be a tuple. if return_slice: obj = ret[0] else: obj = ret else: if return_slice: global_slice = [(slice(d.bounds_global[0], d.bounds_global[1]) for d in self.dimensions)] ret = (ret, global_slice) obj = ret else: obj = ret if not obj.is_empty: if not inplace: obj.set_value(deepcopy(obj.get_value())) obj_value = obj.get_masked_value() for idx, geom in iter_array(obj_value, return_value=True): obj_value.data[idx] = geom.intersection(subset_geometry) return ret
def clip(self, polygon): ## perform an intersects operation first vd = self.intersects(polygon) ## prepare the geometry for intersection prep_igeom = prepared.prep(polygon) ## loop for the intersection geom = vd._geom for ii, jj in iter_array(geom): ref = geom[ii, jj] if not prep_igeom.contains(ref): new_geom = polygon.intersection(ref) geom[ii, jj] = new_geom ret = self.__class__(grid=vd.grid, geom=geom, uid=vd.uid) return (ret)
def _get_all_geoms_(self): ## the fill arrays geom = np.ones(self.grid.shape, dtype=object) geom = np.ma.array(geom, mask=False) ## loop performing the spatial operation try: row = self.grid.row.value col = self.grid.column.value for ii, jj in product(range(row.shape[0]), range(col.shape[0])): geom[ii, jj] = Point(col[jj], row[ii]) ## NcGridMatrixDimension correction except AttributeError: _row = self.grid.row _col = self.grid.column for ii, jj in iter_array(_row): geom[ii, jj] = Point(_col[ii, jj], _row[ii, jj]) return (geom)
def clip(self,igeom): ## logic for convenience. just return the provided collection if a NoneType ## is passed for the 'igeom' arugment if igeom is not None: ## take advange of shapely speedups prep_igeom = prepared.prep(igeom) ## the weight array weights = np.zeros(self.spatial.shape,dtype=float) weights = np.ma.array(weights,mask=self.spatial._value_mask) ## do the spatial operation for idx,geom in iter_array(self.spatial.value,return_value=True): if not prep_igeom.contains(geom): # if keep(prep_igeom,igeom,geom): new_geom = igeom.intersection(geom) weights[idx] = new_geom.area self.spatial._value[idx] = new_geom ## set maximum weight to one self.spatial.weights = weights/np.ma.max(weights)
def area(self): """ :return: geometry areas as a float masked array :rtype: :class:`numpy.ma.MaskedArray` """ if self.is_empty: fill = None else: r_value = self.get_masked_value() fill = np.ones(r_value.shape, dtype=env.NP_FLOAT) mask = self.get_mask() if mask is not None: mask = mask.copy() fill = np.ma.array(fill, mask=mask) for slc, geom in iter_array(r_value, return_value=True): fill.data[slc] = geom.area return fill