def _pixel_sz_trans(ds: gdal.Dataset, ps: float) -> gdal.Dataset: """ Resize the image by pixel size. """ ds_trans = ds.GetGeoTransform() factor = ds_trans[1] / ps if list(ds_trans)[:2] == [0.0, 1.0] or round(factor, 2) == 1: return ds ds_proj = ds.GetProjection() ds_dtype = ds.GetRasterBand(1).DataType width, height = ds.RasterXSize, ds.RasterYSize ts_trans = list(ds_trans) ts_trans[1] = ps ts_trans[5] = -ps mem_drv = gdal.GetDriverByName('MEM') dst_ds = mem_drv.Create('', int(width * factor), int(height * factor), ds.RasterCount, ds_dtype) dst_ds.SetProjection(ds_proj) dst_ds.SetGeoTransform(ts_trans) gdal.ReprojectImage(ds, dst_ds, ds_proj, ds_proj, gdalconst.GRA_CubicSpline) return dst_ds
def coherence_masking(input_gdal_dataset: Dataset, coherence_file_path: str, coherence_thresh: float) -> None: """ Perform coherence masking on raster in-place. Based on gdal_calc formula provided by Nahidul: gdal_calc.py -A 20151127-20151209_VV_8rlks_flat_eqa.cc.tif -B 20151127-20151209_VV_8rlks_eqa.unw.tif --outfile=test_v1.tif --calc="B*(A>=0.8)-999*(A<0.8)" --NoDataValue=-999 """ coherence_ds = gdal.Open(coherence_file_path, gdalconst.GA_ReadOnly) coherence_band = coherence_ds.GetRasterBand(1) src_band = input_gdal_dataset.GetRasterBand(1) ndv = np.nan coherence = coherence_band.ReadAsArray() src = src_band.ReadAsArray() var = {"coh": coherence, "src": src, "t": coherence_thresh, "ndv": ndv} formula = "where(coh>=t, src, ndv)" res = ne.evaluate(formula, local_dict=var) src_band.WriteArray(res) # update metadata input_gdal_dataset.GetRasterBand(1).SetNoDataValue(ndv) input_gdal_dataset.FlushCache() # write on the disc log.info(f"Applied coherence masking using coh file {coherence_file_path}")
def polygonize_pmap(pmap_ds: gdal.Dataset, pmap_threshold=131, layer_name='unknown.gpkg', path_out=None) -> gdal.Dataset: pmap_bin_img = (pmap_ds.ReadAsArray() > pmap_threshold).astype(np.uint8) pmap_bin_ds = add_ref_to_img(pmap_bin_img, pmap_ds) # srs = osr.SpatialReference() srs.ImportFromWkt(pmap_ds.GetProjectionRef()) # if path_out is None: ds_out = ogr.GetDriverByName('MEMORY').CreateDataSource('wrk') # ds_out = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(f'/vsimem/{layer_name}.shp') else: drv = ogr.GetDriverByName('GPKG') if os.path.isfile(path_out): drv.DeleteDataSource(path_out) ds_out = drv.CreateDataSource(path_out) layer_name = os.path.splitext(os.path.basename(path_out))[0] ds_layer = ds_out.CreateLayer(layer_name, geom_type=ogr.wkbPolygon, srs=srs) fd = ogr.FieldDefn('DN', ogr.OFTInteger) ds_layer.CreateField(fd) dst_field = 0 # path_pmap = '/home/ar/data/uiip/quarry_data_test/s2_u8_t3_msk.tif' ds_band = pmap_bin_ds.GetRasterBand(1) gdal.Polygonize(ds_band, ds_band, ds_layer, dst_field, [], callback=gdal.TermProgress) return ds_out
def make_raster(in_ds: gdal.Dataset, fn: str, data: np.ndarray, data_type: object, Nodata=None) -> gdal.Dataset: """Create a one-band GeoTIFF. Parameters: ------------ in_ds - datasource to copy projection and geotransfrom from fn - path to the file to create data - NUmpy array containing data to write data_type - output data type nodata - optional NoData value Returns: ------------ out_ds - datasource to output """ driver = gdal.GetDriverByName('GTiff') out_ds = driver.Create(fn, in_ds.RasterXSize, in_ds.RasterYSize, 1, data_type) out_ds: gdal.Dataset # 从输入数据源中复制投影(坐标系信息) out_ds.SetProjection(in_ds.GetProjection()) # 从输入数据源中复制地理变换 out_ds.SetGeoTransform(in_ds.GetGeoTransform()) out_band = out_ds.GetRasterBand(1) out_band: gdal.Band if Nodata is not None: out_band.SetNoDataValue(Nodata) out_band.WriteArray(data) out_band.FlushCache() out_band.ComputeStatistics(False) return out_ds
def load_from_dataset(self, image_dataset: gdal.Dataset) -> Image: geo_transform = self._load_geotransform(image_dataset) projection = image_dataset.GetProjection() pixels = image_dataset.ReadAsArray() if pixels.ndim > 2: pixels = pixels.transpose(1, 2, 0) return Image(pixels, geo_transform, projection)
def write_mask_to_file(f: gdal.Dataset, file_name: str, mask: np.ndarray) -> None: (width, height) = mask.shape out_image = gdal.GetDriverByName('GTiff').Create(file_name, height, width, bands=1) out_image.SetProjection(f.GetProjection()) out_image.SetGeoTransform(f.GetGeoTransform()) out_image.GetRasterBand(1).WriteArray(mask) out_image.FlushCache()
def prepare_geotif_data(geotiff_handle: gdal.Dataset, rows: int, cols: int, amp=False, cleanup=False) -> np.ndarray: """Load in and clean the GeoTIFF for calculating the color thresholds Args: geotiff_handle: gdal Dataset for the GeoTIFF to prepare rows: number of data rows to read in cols: number of data columns to read in amp: input TIF is in amplitude and not power cleanup: Cleanup artifacts using a -48 db power threshold Returns: data: A numpy array containing the prepared GeoTIFF data """ data = np.nan_to_num( geotiff_handle.GetRasterBand(1).ReadAsArray()[:rows, :cols]) threshold = cleanup_threshold(amp, cleanup) data[data < threshold] = 0.0 if amp: # to power data *= data return data
def test_initial_pixel_values_all_zero_in_band(gdal_dataset: gdal.Dataset, band_index: int): band_number = band_index + 1 band_pixels = gdal_dataset.GetRasterBand(band_number).ReadAsArray() assert np.array_equal(band_pixels, np.array([[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]))
def set_nodata(ds: gdal.Dataset, nodata: int): '''Sets and fills NoDataValue in RasterBand(s)''' logging.info('setting nodata values in raster across all bands') for idx in range(0, ds.RasterCount): idx += 1 band = ds.GetRasterBand(idx) band.Fill(nodata) band.SetNoDataValue(nodata)
def save_prj_file(cls, output_path: str, ds: gdal.Dataset) -> bool: src_srs = osr.SpatialReference() src_srs.ImportFromWkt(ds.GetProjection()) src_srs.MorphToESRI() src_wkt = src_srs.ExportToWkt() prj_file = open(os.path.splitext(output_path)[0] + '.prj', 'wt') prj_file.write(src_wkt) prj_file.close() return True
def load_from_dataset_and_clip(self, image_dataset: gdal.Dataset, extent: GeoPolygon) -> Image: geo_transform = self._load_geotransform(image_dataset) pixel_polygon = extent.to_pixel(geo_transform) bounds = [int(bound) for bound in pixel_polygon.polygon.bounds] pixels = image_dataset.ReadAsArray(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1]) subset_geo_transform = geo_transform.subset(x=bounds[0], y=bounds[1]) pixel_polygon = extent.to_pixel(subset_geo_transform) if pixels.ndim > 2: pixels = pixels.transpose(1, 2, 0) return Image(pixels, subset_geo_transform, image_dataset.GetProjection())\ .clip_with(pixel_polygon, mask_value=0)
def world_to_pixel(image_dataset: gdal.Dataset, longitude: float, latitude: float) -> (int, int): geotransform = image_dataset.GetGeoTransform() ulx, uly = geotransform[0], geotransform[3] x_dist = geotransform[1] x = np.round((longitude - uly) / x_dist).astype(np.int) y = np.round((uly - latitude) / x_dist).astype(np.int) return x, y
def _prepare_bound_checker(self, grib_tmp_700: gdal.Dataset): """ Prepare the boundary checker. """ if not self.bound_checker: logger.info('Creating bound checker.') padf_transform = get_dataset_geometry(grib_tmp_700) crs = CRS.from_string(grib_tmp_700.GetProjection()) # Create a transformer to go from whatever the raster is, to geographic coordinates. raster_to_geo_transformer = get_transformer(crs, NAD83_CRS) self.bound_checker = BoundingBoxChecker(padf_transform, raster_to_geo_transformer) else: logger.info('Re-using bound checker.')
def get_surrounding_grid( band: gdal.Dataset, x_index: int, y_index: int) -> Tuple[List[int], List[float]]: """ Get the grid and values surrounding a given station NOTE: Order of the points is super important! Vertices are ordered clockwise, values are also ordered clockwise. """ # Read scanlines of the raster, build up the four points and corresponding values: scanline_one = band.ReadRaster(xoff=x_index, yoff=y_index, xsize=2, ysize=1, buf_xsize=2, buf_ysize=1, buf_type=gdal.GDT_Float32) row_one = struct.unpack('f' * 2, scanline_one) values = [] values.extend(row_one) scanline_two = band.ReadRaster(xoff=x_index, yoff=y_index+1, xsize=2, ysize=1, buf_xsize=2, buf_ysize=1, buf_type=gdal.GDT_Float32) row_two = struct.unpack('f' * 2, scanline_two) values.append(row_two[1]) values.append(row_two[0]) points = [[x_index, y_index], [x_index+1, y_index], [x_index+1, y_index+1], [x_index, y_index+1]] return points, values
def save_array(raster: gdal.Dataset, result: List[int], offset: int = 0): ''' Store the array into a raster file Parameters: raster: raster file to save data into result: array of pixels to store offset: location to begin storing array ''' logging.info(f'saving raster results') band = raster.GetRasterBand(1) band.WriteArray(result, 0, offset) band = None
def _create_blank_raster( in_data_set: gdal.Dataset, out_raster_path: Path, nr_bands: int = 1, no_data: float = np.nan, e_type: int = 6, ): """Takes input data set and creates new raster. It copies input data set size, projection and geo info.""" gtiff_driver = gdal.GetDriverByName("GTiff") band = in_data_set.GetRasterBand(1) x_size = band.XSize # number of columns y_size = band.YSize # number of rows out_ds = gtiff_driver.Create(out_raster_path.as_posix(), xsize=x_size, ysize=y_size, bands=nr_bands, eType=e_type, options=["BIGTIFF=IF_NEEDED"]) out_ds.SetProjection(in_data_set.GetProjection()) out_ds.SetGeoTransform(in_data_set.GetGeoTransform()) out_ds.GetRasterBand(1).SetNoDataValue(no_data) out_ds.FlushCache() out_ds = None
def gdal_to_json(ds: gdal.Dataset): gt = ds.GetGeoTransform(can_return_null=True) xsize = ds.RasterXSize ysize = ds.RasterYSize srs = get_srs(ds) srs = srs.ExportToProj4() minx = gt[0] + gt[1] * 0 + gt[2] * 0 miny = gt[3] + gt[4] * 0 + gt[5] * 0 maxx = gt[0] + gt[1] * xsize + gt[2] * ysize maxy = gt[3] + gt[4] * xsize + gt[5] * ysize bbox = miny, minx, maxy, maxx band_list = range(1, ds.RasterCount + 1) data = [ ds.ReadAsArray(band_list=[bnd]).ravel().tolist() for bnd in band_list ] ndv = [ds.GetRasterBand(i).GetNoDataValue() for i in band_list] result = dict(bbox=bbox, gt=gt, srs=srs, size=(xsize, ysize), data=data, ndv=ndv) return result
def czml_gdaldem_crop_and_color(ds: gdal.Dataset, out_filename: str = None, output_format: str = None, czml_output_filename: str = None, extent: Optional[GeoRectangle] = None, cutline: Optional[Union[str, List[str]]] = None, color_palette: ColorPalette = None, discrete_mode=DiscreteMode.interp, process_palette=None, common_options: dict = None): do_color = color_palette is not None output_format_crop = 'MEM' if do_color else output_format out_filename_crop = '' if do_color else out_filename ds = gdalos_crop(ds, out_filename=out_filename_crop, output_format=output_format_crop, extent=extent, cutline=cutline, common_options=common_options) min_max = gdalos_util.get_raster_min_max( ds) if process_palette and color_palette.has_percents else None if do_color: ds = gdalos_raster_color(ds, color_palette=color_palette, out_filename=out_filename, output_format=output_format, discrete_mode=discrete_mode) if ds is None: raise Exception('fail to color') if czml_output_filename is not None: if min_max and None not in min_max: color_palette_copy = copy.deepcopy(color_palette) color_palette_copy.apply_percent(*min_max) else: color_palette_copy = color_palette meta = gdal_to_czml.make_czml_description(color_palette_copy, process_palette) ds.SetMetadataItem(gdal_to_czml.czml_metadata_name, meta) gdal_to_czml.gdal_to_czml(ds, name=czml_output_filename, out_filename=czml_output_filename) return ds
def __init__(self, raster: gdal.Dataset, width: float, distance: float, inverse: bool = False, modify: bool = False, average: int = None): self.raster = raster self.width = width self.average = average self.inverse = inverse self.modify = modify self.distance = distance self.no_data_value = raster.GetRasterBand(1).GetNoDataValue() if modify and not distance: logger.warning('Warning: modify option used with zero distance.')
def __calculate(self, ds: gdal.Dataset, out: gdal.Dataset): '''Calculates resulting raster''' # chunk rasters for memory efficiency chunk = math.floor(self.yres / self.const['chunk']) for o in range(0, self.yres, chunk): arr = [] for idx in range(ds.RasterCount): idx += 1 band = ds.GetRasterBand(idx) arr.append(read_band(band, False, o, chunk)) # calculate chunk result = self.calc(self.const, arr, self.region, self.chart == 'cpmed') result = set_mask(self.const, result, arr, self.need_dummy) save_array(out, result, o) result = None
def write(ds: gdal.Dataset, data: np.ndarray, col_off: int = 0, row_off: int = 0, band: int = 1) -> int: """ Write a chip of data to the given data set and band. Args: ds: gdal data set to write to data: data to write col_off: column offset to start writing data row_off: row offset to start writing data band: which band if it is a tiff-stack Returns: 0 if successfull """ return ds.GetRasterBand(band).WriteArray(data, col_off, row_off)
def set_median_ct_colours(const: dict, ds: gdal.Dataset): '''Set the Median Concentration colours''' band = ds.GetRasterBand(1) # set colors logging.info('setting median ct colors') colors = gdal.ColorTable() colors.SetColorEntry(const['water'], (150, 200, 255)) colors.CreateColorRamp(1, (140, 255, 160), 3, (140, 255, 160)) colors.CreateColorRamp(4, (255, 255, 0), 6, (255, 255, 0)) colors.CreateColorRamp(7, (255, 125, 7), 8, (255, 125, 7)) colors.CreateColorRamp(9, (255, 0, 0), 10, (255, 0, 0)) colors.SetColorEntry(11, (150, 150, 150)) colors.SetColorEntry(const['land'], (211, 181, 141)) colors.SetColorEntry(const['nodata'], (255, 255, 255)) band.SetRasterColorTable(colors) band.SetRasterColorInterpretation(gdal.GCI_PaletteIndex) ds = None
def set_frequency_colours(const: dict, ds: gdal.Dataset): '''Set the Frequency colours''' band = ds.GetRasterBand(1) # set colors logging.info('setting frequency colors') colors = gdal.ColorTable() colors.SetColorEntry(const['water'], (150, 200, 255)) colors.CreateColorRamp(1, (255, 242, 0), 15, (255, 242, 0)) colors.CreateColorRamp(16, (255, 200, 0), 33, (255, 200, 0)) colors.CreateColorRamp(34, (255, 125, 3), 50, (255, 125, 3)) colors.CreateColorRamp(51, (255, 0, 112), 66, (255, 0, 112)) colors.CreateColorRamp(67, (204, 0, 184), 84, (204, 0, 184)) colors.CreateColorRamp(85, (0, 0, 255), 99, (0, 0, 255)) colors.SetColorEntry(100, (75, 75, 75)) colors.SetColorEntry(const['land'], (211, 181, 141)) colors.SetColorEntry(const['nodata'], (255, 255, 255)) band.SetRasterColorTable(colors) band.SetRasterColorInterpretation(gdal.GCI_PaletteIndex) ds = None
def set_median_predom_colours(const: dict, ds: gdal.Dataset): '''Set the Median Predominance colours''' band = ds.GetRasterBand(1) # set colors logging.info('setting median predominance colors') colors = gdal.ColorTable() colors.SetColorEntry(const['water'], (150, 200, 255)) colors.SetColorEntry(1, (240, 210, 250)) colors.SetColorEntry(4, (135, 60, 215)) colors.SetColorEntry(5, (220, 80, 215)) colors.SetColorEntry(6, (255, 255, 0)) colors.SetColorEntry(7, (155, 210, 0)) colors.SetColorEntry(10, (0, 200, 20)) colors.SetColorEntry(11, (0, 120, 0)) colors.SetColorEntry(12, (180, 100, 50)) colors.SetColorEntry(const['land'], (211, 181, 141)) colors.SetColorEntry(const['nodata'], (255, 255, 255)) band.SetRasterColorTable(colors) band.SetRasterColorInterpretation(gdal.GCI_PaletteIndex) ds = None
def format_field_names(dataset: gdal.Dataset, fields: list): """ dataset: Given source data source, usually a local file / s3 url fields: a list of predefined field names If we have a list of new field names, then rename fields with "fields" otherwise, change all field names to lower case connected by underscore """ assert dataset, "dataset: gdal.Dataset shouldn't be None" layer = dataset.GetLayer(0) layerDefn = layer.GetLayerDefn() if len(fields) == 0: for i in range(layerDefn.GetFieldCount()): fieldDefn = layerDefn.GetFieldDefn(i) fieldName = fieldDefn.GetName() fieldDefn.SetName(fieldName.replace(" ", "_").lower()) else: for i in range(len(fields)): fieldDefn = layerDefn.GetFieldDefn(i) fieldDefn.SetName(fields[i]) return dataset
def get_raster_bands(ds: gdal.Dataset) -> Iterator[gdal.Band]: return (ds.GetRasterBand(i + 1) for i in range(ds.RasterCount))
def get_geotransform_and_size( ds: gdal.Dataset) -> Tuple[GeoTransform, Tuple[int, int]]: return ds.GetGeoTransform(), (ds.RasterXSize, ds.RasterYSize)
def test_dataset_format_is_geotiff(gdal_dataset: gdal.Dataset): assert gdal_dataset.GetDriver().LongName == "GeoTIFF"
def test_projection_is_wgs84(gdal_dataset: gdal.Dataset): assert gdal_dataset.GetProjection()[8:14] == 'WGS 84'
def read_band(dataset: gdal.Dataset, bnd_ndx: int = 1) -> Tuple[dict, 'np.array']: """ Read data and metadata of a rasters band based on GDAL. :param dataset: the source raster dataset :type dataset: gdal.Dataset :param bnd_ndx: the index of the band (starts from 1) :type bnd_ndx: int :return: the band parameters and the data values :rtype: dict of data parameters and values as a numpy.array :raises: RasterIOException Examples: """ band = dataset.GetRasterBand(bnd_ndx) data_type = gdal.GetDataTypeName(band.DataType) unit_type = band.GetUnitType() stats = band.GetStatistics(False, False) if stats is None: dStats = dict(min=None, max=None, mean=None, std_dev=None) else: dStats = dict(min=stats[0], max=stats[1], mean=stats[2], std_dev=stats[3]) noDataVal = band.GetNoDataValue() nOverviews = band.GetOverviewCount() colorTable = band.GetRasterColorTable() if colorTable: nColTableEntries = colorTable.GetCount() else: nColTableEntries = 0 # read data from band grid_values = band.ReadAsArray() if grid_values is None: raise RasterIOException("Unable to read data from rasters") # transform data into numpy array data = np.asarray(grid_values) # if nodatavalue exists, set null values to NaN in numpy array if noDataVal is not None and np.isfinite(noDataVal): data = np.where(abs(data - noDataVal) > 1e-10, data, np.NaN) band_params = dict(dataType=data_type, unitType=unit_type, stats=dStats, noData=noDataVal, numOverviews=nOverviews, numColorTableEntries=nColTableEntries) return band_params, data