def indicesToBBOX(indices, src_img): """ Takes indices from DG image and returns the BBOX in coordinates of transform. :param indices: list :param src_img: raster data :return: Shapely POLYGON """ min_coords = xy(src_img.transform, indices[1], indices[2]) max_coords = xy(src_img.transform, indices[0], indices[3]) return box(min_coords[0], min_coords[1], max_coords[0], max_coords[1])
def test_xy_gcps_rpcs(dataset, transform_attr, coords, expected): with rasterio.open(dataset, 'r') as src: transform = getattr(src, transform_attr) if transform_attr == 'gcps': transform = transform[0] for coord, truth in zip(coords, expected): assert xy(transform, *coord) == pytest.approx(truth) # check offset behaviour assert xy(transform, 0, 0, offset='lr') == \ xy(transform, 0, 1, offset='ll') == \ xy(transform, 1, 1, offset='ul') == \ xy(transform, 1, 0, offset='ur')
def to_xarray(self, columns, dim_name="time", nodata=0, dtype="uint8"): """ Convert a data column to a xarray data array. Parameters ---------- columns : str or list of str name or names of the data columns. dim_name : str name of the outermost dimension set by the `columns` argument. nodata : numeric value to be assigned to pixels with no data. dtype : str or numpy dtype the data type. Returns ------- da : xr.DataArray A xarray data array. """ x = self[self.x_column].values y = self[self.y_column].values xres, yres = self.res i = (y - min(y)) // yres j = (x - min(x)) // xres # ensure that `columns` is a list if isinstance(columns, str): columns = [columns] # use a head-tail iteration pattern to get the array shape and prepare # the pixel coordinates arr = self._to_ndarray(columns[0], i, j, nodata, dtype) num_rows, num_cols = arr.shape _transform = self.get_transform() cols = np.arange(num_cols) rows = np.arange(num_rows) x_coords, _ = transform.xy(_transform, cols, cols) _, y_coords = transform.xy(_transform, rows, rows) return xr.DataArray( [arr] + [ self._to_ndarray(column, i, j, nodata, dtype) for column in columns[1:] ], dims=[dim_name, self.y_column, self.x_column], coords={ self.x_column: x_coords, self.y_column: y_coords, dim_name: columns, }, attrs=dict(nodata=nodata, pyproj_srs=f"epsg:{self.crs.to_epsg()}"), )
def get_ref_da(geom, res, fill=0, crs=None): if crs is None: crs = settings.CRS ref_transform, (ref_height, ref_width) = _calculate_transform(geom, res) rows = np.arange(ref_height) cols = np.arange(ref_width) xs, _ = transform.xy(ref_transform, cols, cols) _, ys = transform.xy(ref_transform, rows, rows) ref_da = xr.DataArray(fill, dims=('y', 'x'), coords={'y': ys, 'x': xs}) ref_da.attrs['pyproj_srs'] = crs return ref_da
def test_xy_rowcol_inverse(): # TODO this is an ideal candiate for # property-based testing with hypothesis aff = Affine.identity() rows_cols = ([0, 0, 10, 10], [0, 10, 0, 10]) assert rows_cols == rowcol(aff, *xy(aff, *rows_cols))
def array2Coords(transform, row, col): """ * convert between array position and coords * params are row,col (y,x) as expected by rasterio * returns coords at the CENTRE of the cell """ return xy(transform, row, col)
def map_pixels_to_coordinates(reference_tiff, dst_epsg, pixels): """We are assuming that the pixels are a list of tuples. For example: [(row1, col1), (row2, col2)]""" rows = [row for (row, _) in pixels] cols = [col for (_, col) in pixels] xs, ys = transform.xy(reference_tiff.transform, rows, cols) dst_crs = rio.crs.CRS.from_epsg(dst_epsg) return map_to_new_crs(reference_tiff.crs, dst_crs, xs, ys)
def get_cropped_profile(profile: dict, slice_x: slice, slice_y: slice): """ slice_x and slice_y are numpy slices """ x_start = slice_x.start or 0 y_start = slice_y.start or 0 x_stop = slice_x.stop or profile['width'] y_stop = slice_y.stop or profile['height'] width = x_stop - x_start height = y_stop - y_start profile_cropped = profile.copy() trans = profile['transform'] x_cropped, y_cropped = xy(trans, y_start, x_start, offset='ul') trans_list = list(trans.to_gdal()) trans_list[0] = x_cropped trans_list[3] = y_cropped tranform_cropped = Affine.from_gdal(*trans_list) profile_cropped['transform'] = tranform_cropped profile_cropped['height'] = height profile_cropped['width'] = width return profile_cropped
def array2Coords(a, row, col): """ * convert between array position and coords * params are row,col (y,x) as expected by rasterio * returns coords at the CENTRE of the cell """ x, y = xy(a, row, col) return int(x), int(y)
def grid_x(self): try: return self._grid_x except AttributeError: cols = np.arange(self.meta['width']) x, _ = transform.xy(self.meta['transform'], cols, cols) self._grid_x = x return self._grid_x
def grid_y(self): try: return self._grid_y except AttributeError: rows = np.arange(self.meta['height']) _, y = transform.xy(self.meta['transform'], rows, rows) self._grid_y = y return self._grid_y
def rowcol_to_latlon(row, col, res=250): row = np.asarray(row) if type(row) is list else row col = np.asarray(col) if type(col) is list else col x, y = xy(Affine(*albers_conus_transform(res)), row, col) p1 = Proj(CRS.from_wkt(albers_conus_crs())) p2 = Proj(proj='latlong', datum='WGS84') lon, lat = transform(p1, p2, x, y) return lat, lon
def getCoordinates(dstransform, width_df): new_data = {'lon': [], 'lat': [], 'width_lons': [], 'width_lats': []} for idx, row in width_df.iterrows(): lon, lat = transform.xy(dstransform, row['coli'], row['rowi']) new_data['lon'].append(lon) new_data['lat'].append(lat) width_lons, width_lats = transform.xy(dstransform, row['width_coli'], row['width_rowi']) new_data['width_lons'].append(width_lons) new_data['width_lats'].append(width_lats) width_df['lon'] = new_data['lon'] width_df['lat'] = new_data['lat'] width_df['width_lons'] = new_data['width_lons'] width_df['width_lats'] = new_data['width_lats'] return width_df
def getCoordinates(dstransform, width_df): new_data = {'x': [], 'y': [], 'width_x': [], 'width_y': []} for idx, row in width_df.iterrows(): x, y = transform.xy(dstransform, row['coli'], row['rowi']) new_data['x'].append(x) new_data['y'].append(y) width_x, width_y = transform.xy(dstransform, row['width_coli'], row['width_rowi']) new_data['width_x'].append(width_x) new_data['width_y'].append(width_y) width_df['x'] = new_data['x'] width_df['y'] = new_data['y'] width_df['width_x'] = new_data['width_x'] width_df['width_y'] = new_data['width_y'] return width_df
def spatial_position(x: int, y: int, transform: Affine) -> list: """ CONVERT PIXEL COORDINATE TO SPATIAL COORDINATES :param transform: :param x: :param y: :return: """ return xy(transform, x, y)
def ul(self, row, col): """Returns the coordinates (x, y) of the upper left corner of a pixel at `row` and `col` in the units of the dataset's coordinate reference system. Deprecated; Use `xy(row, col, offset='ul')` instead. """ warnings.warn("ul method is deprecated. Use xy(row, col, offset='ul')", DeprecationWarning) return xy(self.transform, row, col, offset='ul')
def test_xy(): aff = Affine(300.0379266750948, 0.0, 101985.0, 0.0, -300.041782729805, 2826915.0) ul_x, ul_y = aff * (0, 0) xoff = aff.a yoff = aff.e assert xy(aff, 0, 0, offset='ul') == (ul_x, ul_y) assert xy(aff, 0, 0, offset='ur') == (ul_x + xoff, ul_y) assert xy(aff, 0, 0, offset='ll') == (ul_x, ul_y + yoff) expected = (ul_x + xoff, ul_y + yoff) assert xy(aff, 0, 0, offset='lr') == expected expected = (ul_x + xoff / 2, ul_y + yoff / 2) assert xy(aff, 0, 0, offset='center') == expected assert xy(aff, 0, 0, offset='lr') == \ xy(aff, 0, 1, offset='ll') == \ xy(aff, 1, 1, offset='ul') == \ xy(aff, 1, 0, offset='ur')
def get_xs_ys(cols, rows, transform): """ Computes rasters of x and y coordinates, based on row and column counts and a defined transform. :param cols: list of ints, defining the column counts :param rows: list of ints, defining the row counts :param transform: np.ndarray, 1D, with 6 rasterio compatible transform parameters :return: 2 np.ndarray (MxN): xs: x-coordinates, ys: y-coordinates, lons: longitude coordinates, lats: latitude coordinates """ xs, ys = xy(transform, rows, cols) xs, ys = np.array(xs), np.array(ys) return xs, ys
def pixel_to_map(self, pixel_point): """Transform point from pixel to map-based coordinates. Args: pixel_point: (x, y) tuple in pixel coordinates Returns: (x, y) tuple in map coordinates """ image_point = xy(self.transform, int(pixel_point[1]), int(pixel_point[0])) map_point = self.image2map.transform(*image_point) return map_point
def sample_from_inhabitable_area(img, transform, sample_size=1): # Get inhabitable areas , developed area code starts from 21 to 31 # Source: https://www.usgs.gov/centers/eros/science/ #national-land-cover-database?qt-science_center_objects=0 ##qt-science_center_objects amask = (img >= 21) & (img <= 31) goodr, goodc = np.nonzero(amask) # Get random locations sampling from goodr,goodc r_choices = np.random.choice(np.arange(goodr.size), size=sample_size) rs, cs = goodr[r_choices], goodc[r_choices] # Translate back into north america coordinates which is EPSG4269'EPSG:4269 xs, ys = rtransform.xy(transform=transform, rows=rs, cols=cs) lons, lats = rwarp.transform(src.crs, block_groups.crs, xs, ys) return lons, lats
def make_geometry(clu, X, affine, crs, buffer_amount=100): """ Convert clusters cells into contigious convex hulls. These are then buffered and overlapping polygons are merged. Parameters ---------- clu : list of sets The array of clusters with indices. X : array-like, shape = [n_samples, n_features] The list of points with coordinates. affine : affine.Affine Raster affine transformation. crs : CRS Coordinate reference system. buffer_amount : int, default 100 Amount in metres by which to buffer polygons before merging. Returns ------- clusters : GeoDataFrame The geometry-fied clusters. """ clusters = [] for c in clu: coords = [X[loc] for loc in c] coords_real = [xy(affine, loc[0], loc[1]) for loc in coords] m = MultiPoint(coords_real) clusters.append(m.wkt) gdf = pd.DataFrame(clusters) geometry = gdf[0].map(shapely.wkt.loads) gdf = gdf.drop(0, axis=1) gdf = gpd.GeoDataFrame(gdf, crs=crs, geometry=geometry) gdf["geometry"] = gdf.geometry.convex_hull buffer_amount /= 1e5 # divide by 100K for rough conversion to degrees gdf["geometry"] = gdf.geometry.buffer(buffer_amount) gdf = merge_overlap(gdf) gdf["geometry"] = gdf.geometry.convex_hull gdf = merge_overlap(gdf) print("Number of clusters:", len(gdf)) return gdf
def get_utm_zone(crs, transform, shape): """ Calculates the UTM zone for the image center :param crs: image crs :param transform: image transform :param size: image size [height, width] :return: UTM zone in format 'EPSG:32XYZ' """ #find image extents #todo: check for image size! #if it is more than 600 km longitude - recommend not to transform to utm at once #find image center center_xy = xy(transform, shape[0] / 2, shape[1] / 2) center_latlon = warp.transform(crs, CRS_LATLON, [center_xy[0]], [center_xy[1]]) #calc zone return _utm_zone(center_latlon[1][0], center_latlon[0][0])
def get_cropped_profile(profile: dict, slice_x: slice, slice_y: slice) -> dict: """ This is a tool for using a reference profile and numpy slices (i.e. np.s_[start: stop]) to create a new profile that is within the window of slice_x, slice_y. Parameters ---------- profile : dict The reference rasterio profile. slice_x : slice The horizontal slice. slice_y : slice The vertical slice. Returns ------- dict: The rasterio dictionary from cropping. """ x_start = slice_x.start or 0 y_start = slice_y.start or 0 x_stop = slice_x.stop or profile['width'] y_stop = slice_y.stop or profile['height'] if (x_start < 0) | (x_stop < 0) | (y_start < 0) | (y_stop < 0): raise ValueError('Slices must be positive') width = x_stop - x_start height = y_stop - y_start profile_cropped = profile.copy() trans = profile['transform'] x_cropped, y_cropped = xy(trans, y_start, x_start, offset='ul') trans_list = list(trans.to_gdal()) trans_list[0] = x_cropped trans_list[3] = y_cropped tranform_cropped = Affine.from_gdal(*trans_list) profile_cropped['transform'] = tranform_cropped profile_cropped['height'] = height profile_cropped['width'] = width return profile_cropped
def load_dem(to_load, meta=None): """Loads and transforms a Digital Elevation Model (DEM) image into a DataFrame. Parameters ---------- to_load : str or arr path to raster or an in-memory array with DEM data meta : dict, optional dictionary of raster attributes, must include width, height, transform, and crs Returns ------- df : DataFrame flattened raster with additional derived fields """ if isinstance(to_load, str): with rasterio.open(to_load) as src: dem = src.read() meta = src.meta elif isinstance(to_load, np.ndarray) and meta is not None: dem = to_load else: raise TypeError df = pd.DataFrame(columns=['elevation', 'lat', 'lon']) df['elevation'] = dem.ravel() df['elevation'] = df['elevation'].astype('Int64') # fetch lat and lon for each pixel in a raster rows, cols = np.indices((meta['height'], meta['width'])) xs, ys = transform.xy(meta['transform'], cols.ravel(), rows.ravel()) lons, lats = warp.transform(meta['crs'], {'init': 'EPSG:4326'}, xs, ys) df['lat'] = lats df['lon'] = lons # nodata represented as -32768 df.loc[df.elevation == -32768] = np.nan return df
def get_utm_zone(crs, transform, shape): """ Calculates the UTM zone for the image center :param crs: image crs :param transform: image transform :param shape: image size [height, width] :return: rasterio CRS of given UTM zone """ # find image extents # todo: check for image size! # if it is more than 600 km longitude, or crosses the equator, or 80N/ 80S line # - recommend not to transform to utm at once # find image center center_xy = xy(transform, shape[0] / 2, shape[1] / 2) center_latlon = warp.transform(crs, CRS_LATLON, [center_xy[0]], [center_xy[1]]) #calc zone return _utm_zone(center_latlon[1][0], center_latlon[0][0])
def xy(self, row, col, offset="center"): """Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`. The pixel's center is returned by default, but a corner can be returned by setting `offset` to one of `ul, ur, ll, lr`. Parameters ---------- row : int Pixel row. col : int Pixel column. offset : str, optional Determines if the returned coordinates are for the center of the pixel or for a corner. Returns ------- tuple ``(x, y)`` """ return xy(self.transform, row, col, offset=offset)
def get_zonal_stats_from_point(self, point: Vector3) -> List[Optional[Dict]]: results = [] if self.boundary_data: for plugin in (x for x in (self.terrain_data, self.attribute_data, self.flow_dir_data, self.flow_dir_data) if x is not None): if plugin is self.terrain_data: var = self._elevation_attribute.selected elif plugin is self.attribute_data: var = self._attribute.selected else: var = '' raster = plugin.get_data(var) affine = plugin.affine res = plugin.resolution var_stats = plugin.variable_stats(var) nodata = var_stats.nodata_value # Transform point coordinates to crs of raster p = shapely.geometry.Point( *transform.xy(affine, point.x / res, point.y / res)) zones = [] for feat in self.boundary_data.get_features(): if shapely.geometry.shape(feat['geometry']).contains(p): zones.append(feat) # Retrieve zonal stats for this raster result = zonal_stats(zones, raster, affine=affine, nodata=nodata, add_stats=self.zonal_stats) for j, row in enumerate(result): row['Name'] = "{} (Zone {})".format( plugin.data_name, zones[j].get('id')) results.append(row) return results
def get_zonal_stats_from_feature( self, feature: LinearRing) -> List[Optional[Dict]]: results = [] if self.terrain_data: # Normalize feature coordinates to terrain resolution t_res = self.terrain_data.resolution normalized_coords = [(p[0] / t_res, p[1] / t_res) for p in feature.coords] for plugin in (x for x in (self.terrain_data, self.attribute_data, self.flow_dir_data, self.flow_dir_data) if x is not None): if plugin is self.terrain_data: var = self._elevation_attribute.selected elif plugin is self.attribute_data: var = self._attribute.selected else: var = '' raster = plugin.get_data(var, Timeline.app().current) affine = plugin.affine var_stats = plugin.variable_stats(var) nodata = var_stats.nodata_value feat = Polygon( *[[transform.xy(affine, *p) for p in normalized_coords]]) # Transform normalized raster coordinates to CRS of raster to query and obtain results result = zonal_stats(feat, raster, affine=affine, nodata=nodata, add_stats=self.zonal_stats)[0] result['Name'] = plugin.data_name results.append(result) return results
def raster_to_lines(guess_skel_in): """ Convert thinned raster to linestring geometry. Parameters ---------- guess_skel_in : path-like Output from thin(). Returns ------- guess_gdf : GeoDataFrame Converted to geometry. """ rast = rasterio.open(guess_skel_in) arr = rast.read(1) affine = rast.transform max_row = arr.shape[0] max_col = arr.shape[1] lines = [] for row in range(0, max_row): for col in range(0, max_col): loc = (row, col) if arr[loc] == 1: for i in range(-1, 2): for j in range(-1, 2): next_row = row + i next_col = col + j next_loc = (next_row, next_col) # ensure we're within bounds # ensure we're not looking at the same spot if ( next_row < 0 or next_col < 0 or next_row >= max_row or next_col >= max_col or next_loc == loc ): continue if arr[next_loc] == 1: line = (loc, next_loc) rev = (line[1], line[0]) if line not in lines and rev not in lines: lines.append(line) real_lines = [] for line in lines: real = (xy(affine, line[0][0], line[0][1]), xy(affine, line[1][0], line[1][1])) real_lines.append(real) shapes = [] for line in real_lines: shapes.append(LineString([Point(line[0]), Point(line[1])]).wkt) guess_gdf = pd.DataFrame(shapes) geometry = guess_gdf[0].map(shapely.wkt.loads) guess_gdf = guess_gdf.drop(0, axis=1) guess_gdf = gpd.GeoDataFrame(guess_gdf, crs=rast.crs, geometry=geometry) guess_gdf["same"] = 0 guess_gdf = guess_gdf.dissolve(by="same") return guess_gdf
def test_bogus_offset(): with pytest.raises(ValueError): xy(None, 1, 0, offset='bogus')