math.ceil(abs(extent.xMaximum() - extent.xMinimum()) / cell_size)) raster_height = int( math.ceil(abs(extent.yMaximum() - extent.yMinimum()) / cell_size)) # ============================================================================= # 3. rasterize temporary raterdataset from vector layer, extent, cellsize # ============================================================================= temporary_path = getTempFilename('tif') driver = gdal.GetDriverByName('GTiff') rasterizedDS = driver.Create(temporary_path, raster_width, raster_height, 1, gdal.GDT_Byte) rasterizedDS.GetRasterBand(1).SetNoDataValue(nodata) rasterizedDS.SetGeoTransform(transform) rasterizedDS.SetProjection(srs.ExportToWkt()) rasterizedDS.GetRasterBand(1).Fill(nodata) gdal.RasterizeLayer(rasterizedDS, [1], ogr_vector_layer, burn_values=[1]) # ============================================================================= # 4. compute proximity from rasterized dataset # ============================================================================= options = [] if Max_Distance > 0: options.append('MAXDIST=' + str(Max_Distance)) proximityDs = driver.Create(Output, raster_width, raster_height, 1, gdal.GetDataTypeByName(TYPES[Raster_Type])) proximityDs.GetRasterBand(1).SetNoDataValue(nodata) proximityDs.SetGeoTransform(transform) proximityDs.SetProjection(srs.ExportToWkt()) proximityDs.GetRasterBand(1).Fill(nodata) gdal.ComputeProximity(rasterizedDS.GetRasterBand(1),
def karst_detection(raster, shp): """ :param raster: Raster class object built from karst raster. :param shp: SHP class object from entire basin. :return: Shp.karst_flag will be triggered, or it won't. """ r_data = raster.data r_band = r_data.GetRasterBand(1) r_geotransform = raster.gt() v_data = shp.shp v_feature = v_data.GetLayer(0) nodata_value = r_band.GetNoDataValue() sourceprj = v_feature.GetSpatialRef() targetprj = osr.SpatialReference(wkt=r_data.GetProjection()) if sourceprj.ExportToProj4() != targetprj.ExportToProj4(): to_fill = ogr.GetDriverByName('Memory') ds = to_fill.CreateDataSource("project") outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon) feature = v_feature.GetFeature(0) transform = osr.CoordinateTransformation(sourceprj, targetprj) transformed = feature.GetGeometryRef() transformed.Transform(transform) geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb()) defn = outlayer.GetLayerDefn() feat = ogr.Feature(defn) feat.SetGeometry(geom) outlayer.CreateFeature(feat.Clone()) feat = None v_feature = outlayer src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent()) src_array = r_band.ReadAsArray(*src_offset) new_gt = ( (r_geotransform[0] + (src_offset[0] * r_geotransform[1])), r_geotransform[1], 0.0, (r_geotransform[3] + (src_offset[1] * r_geotransform[5])), 0.0, r_geotransform[5] ) stats = [] # Keeping this unless there are several features in the same shapefile. driver = gdal.GetDriverByName('MEM') v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) v_to_r.SetGeoTransform(new_gt) gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1]) v_to_r_array = v_to_r.ReadAsArray() masked = np.ma.MaskedArray( src_array, mask=np.logical_or( src_array == nodata_value, np.logical_not(v_to_r_array) ), fill_value=np.nan #mask=np.logical_not(v_to_r_array) ) if masked.max() > 0: return 1 else: return 0
def twi_bins(raster, shp, nbins=30): r_data = raster.data r_band = r_data.GetRasterBand(1) r_geotransform = raster.gt() v_data = shp.shp v_feature = v_data.GetLayer(0) sourceprj = v_feature.GetSpatialRef() targetprj = osr.SpatialReference(wkt=r_data.GetProjection()) if sourceprj.ExportToProj4() != targetprj.ExportToProj4(): to_fill = ogr.GetDriverByName('Memory') ds = to_fill.CreateDataSource("project") outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon) feature = v_feature.GetFeature(0) transform = osr.CoordinateTransformation(sourceprj, targetprj) transformed = feature.GetGeometryRef() transformed.Transform(transform) geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb()) defn = outlayer.GetLayerDefn() feat = ogr.Feature(defn) feat.SetGeometry(geom) outlayer.CreateFeature(feat.Clone()) feat = None v_feature = outlayer src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent()) src_array = r_band.ReadAsArray(*src_offset) new_gt = ( (r_geotransform[0] + (src_offset[0] * r_geotransform[1])), r_geotransform[1], 0.0, (r_geotransform[3] + (src_offset[1] * r_geotransform[5])), 0.0, r_geotransform[5] ) driver = gdal.GetDriverByName('MEM') v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) v_to_r.SetGeoTransform(new_gt) gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1]) v_to_r_array = v_to_r.ReadAsArray() src_array = np.array(src_array, dtype=float) v_to_r_array = np.array(v_to_r.ReadAsArray(), dtype=float) masked = np.ma.MaskedArray( src_array, mask=np.logical_or( np.logical_not(v_to_r_array), src_array < 0) ) mx = masked.max() mn = masked.min() mean = masked.mean() intvl = (mx - mn) / (nbins + 1) edges = np.arange(mn, mx, intvl) histo = np.histogram(masked, bins=edges) # TWI Mean is the value we need for TopModel Input. bins = [] for i in range(nbins): line = [] bin = i + 1 if i == 0: twi_val = histo[1][i] / 2 else: twi_val = (histo[1][i] + histo[1][i-1]) / 2 proportion = histo[0][i]/np.sum(histo[0]) line.append(bin) line.append(twi_val) line.append(proportion) bins.append(line) df = pd.DataFrame(bins, columns=['bin', 'twi', 'proportion']) df.set_index('bin', inplace=True) return df
vx_min, vx_max, vy_min, vy_max = source_layer.GetExtent() # this is extent of Australia # Create the destination extent yt,xt = ndviMean.sel(year=2010).shape # set up mask image including projection target_ds = gdal.GetDriverByName('MEM').Create('', xt, yt, gdal.GDT_Byte) target_ds.SetGeoTransform(geotransform) albers = osr.SpatialReference() albers.ImportFromEPSG(3577) target_ds.SetProjection(albers.ExportToWkt()) band = target_ds.GetRasterBand(1) band.SetNoDataValue(noDataVal) # Rasterise gdal.RasterizeLayer(target_ds, [1], source_layer, burn_values=[1]) # Read as array the GMW mask gmwMaskArr = band.ReadAsArray() print("Apply the GMW Mask to the NDVI values") mangroveNDVIMean = ndviMean.where(gmwMaskArr == 1) print("Apply thresholds to NDVI to find total mangrove area (in pixels) and closed canopy mangrove area.") mangroveAreaPxlC = mangroveNDVIMean>ndviThresLow clMangroveAreaPxlC = mangroveNDVIMean>ndviThresHigh mangroveAreaPxlC.attrs['affine'] = affine mangroveAreaPxlC.attrs['crs'] = crswkt clMangroveAreaPxlC.attrs['affine'] = affine clMangroveAreaPxlC.attrs['crs'] = crswkt
def geoJsonToPASCALVOC2012(xmlFileName, geoJson, rasterImageName, im_id='', dataset='SpaceNet', folder_name='spacenet', annotationStyle='PASCAL VOC2012', segment=True, bufferSizePix=2.5, convertTo8Bit=True, outputPixType='Byte', outputFormat='GTiff', bboxResize=1.0): print("creating {}".format(xmlFileName)) buildingList = gT.convert_wgs84geojson_to_pixgeojson( geoJson, rasterImageName, image_id=[], pixelgeojson=[], only_polygons=True, breakMultiPolygonGeo=True, pixPrecision=2) # buildinglist.append({'ImageId': image_id, #'BuildingId': building_id, #'polyGeo': ogr.CreateGeometryFromWkt(geom.ExportToWkt()), #'polyPix': ogr.CreateGeometryFromWkt('POLYGON EMPTY') #}) srcRaster = gdal.Open(rasterImageName) outputRaster = rasterImageName if convertTo8Bit: cmd = [ 'gdal_translate', '-ot', outputPixType, '-of', outputFormat, '-co', '"PHOTOMETRIC=rgb"' ] scaleList = [] for bandId in range(srcRaster.RasterCount): bandId = bandId + 1 band = srcRaster.GetRasterBand(bandId) min = band.GetMinimum() max = band.GetMaximum() # if not exist minimum and maximum values if min is None or max is None: (min, max) = band.ComputeRasterMinMax(1) cmd.append('-scale_{}'.format(bandId)) cmd.append('{}'.format(0)) cmd.append('{}'.format(max)) cmd.append('{}'.format(0)) cmd.append('{}'.format(255)) cmd.append(rasterImageName) if outputFormat == 'JPEG': outputRaster = xmlFileName.replace('.xml', '.jpg') else: outputRaster = xmlFileName.replace('.xml', '.tif') outputRaster = outputRaster.replace('_img', '_8bit_img') cmd.append(outputRaster) print(cmd) subprocess.call(cmd) if segment: segmented = 1 # 1=True, 0 = False else: segmented = 0 top = Element('annotation') ## write header childFolder = SubElement(top, 'folder') childFolder.text = dataset childFilename = SubElement(top, 'filename') childFilename.text = rasterImageName # write source block childSource = SubElement(top, 'source') SubElement(childSource, 'database').text = dataset SubElement(childSource, 'annotation').text = annotationStyle # write size block childSize = SubElement(top, 'size') SubElement(childSize, 'width').text = str(srcRaster.RasterXSize) SubElement(childSize, 'height').text = str(srcRaster.RasterYSize) SubElement(childSize, 'depth').text = str(srcRaster.RasterCount) SubElement(top, 'segmented').text = str(segmented) # start object segment for building in buildingList: objectType = 'building' objectPose = 'Left' objectTruncated = 0 # 1=True, 0 = False objectDifficulty = 0 # 0 Easy - 3 Hard env = building['polyPix'].GetEnvelope() xmin = env[0] ymin = env[2] xmax = env[1] ymax = env[3] if bboxResize != 1.0: xCenter = (xmin + xmax) / 2 yCenter = (ymin + ymax) / 2 bboxNewHalfHeight = ((ymax - ymin) / 2) * bboxResize bboxNewHalfWidth = ((ymax - ymin) / 2) * bboxResize xmin = xCenter - bboxNewHalfWidth xmax = xCenter + bboxNewHalfWidth ymin = yCenter - bboxNewHalfHeight ymax = yCenter + bboxNewHalfHeight # Get Envelope returns a tuple (minX, maxX, minY, maxY) childObject = SubElement(top, 'object') SubElement(childObject, 'name').text = objectType SubElement(childObject, 'pose').text = objectPose SubElement(childObject, 'truncated').text = str(objectTruncated) SubElement(childObject, 'difficult').text = str(objectDifficulty) # write bounding box childBoundBox = SubElement(childObject, 'bndbox') SubElement(childBoundBox, 'xmin').text = str(int(round(xmin))) SubElement(childBoundBox, 'ymin').text = str(int(round(ymin))) SubElement(childBoundBox, 'xmax').text = str(int(round(xmax))) SubElement(childBoundBox, 'ymax').text = str(int(round(ymax))) with open(xmlFileName, 'w') as f: f.write(prettify(top)) print('creating segmentation') if segment: NoData_value = -9999 source_ds = ogr.Open(geoJson) source_layer = source_ds.GetLayer() srs = source_layer.GetSpatialRef() memDriver = ogr.GetDriverByName('MEMORY') outerBuffer = memDriver.CreateDataSource('outer') outerBufferLayer = outerBuffer.CreateLayer("test", srs, geom_type=ogr.wkbPolygon) innerBuffer = memDriver.CreateDataSource('inner') innerBufferLayer = innerBuffer.CreateLayer("test2", srs, geom_type=ogr.wkbPolygon) idField = ogr.FieldDefn("objid", ogr.OFTInteger) innerBufferLayer.CreateField(idField) featureDefn = innerBufferLayer.GetLayerDefn() bufferDist = srcRaster.GetGeoTransform()[1] * bufferSizePix for idx, feature in enumerate(source_layer): ingeom = feature.GetGeometryRef() geomBufferOut = ingeom.Buffer(bufferDist) geomBufferIn = ingeom.Buffer(-bufferDist) print(geomBufferIn.ExportToWkt()) print(geomBufferIn.IsEmpty()) print(geomBufferIn.IsSimple()) if geomBufferIn.GetArea() > 0.0: outBufFeature = ogr.Feature(featureDefn) outBufFeature.SetGeometry(geomBufferOut) outerBufferLayer.CreateFeature(outBufFeature) inBufFeature = ogr.Feature(featureDefn) inBufFeature.SetGeometry(geomBufferIn) inBufFeature.SetField('objid', idx) innerBufferLayer.CreateFeature(inBufFeature) outBufFeature = None inBufFeature = None print('writing GTIFF sgcls') print('rasterToWrite = {}'.format( xmlFileName.replace('.xml', 'segcls.tif'))) target_ds = gdal.GetDriverByName('GTiff').Create( xmlFileName.replace('.xml', 'segcls.tif'), srcRaster.RasterXSize, srcRaster.RasterYSize, 1, gdal.GDT_Byte) print('setTransform') target_ds.SetGeoTransform(srcRaster.GetGeoTransform()) print('setProjection') target_ds.SetProjection(srcRaster.GetProjection()) print('getBand') band = target_ds.GetRasterBand(1) print('setnodata') band.SetNoDataValue(NoData_value) # Rasterize print('rasterize outer buffer') gdal.RasterizeLayer(target_ds, [1], outerBufferLayer, burn_values=[255]) print('rasterize inner buffer') gdal.RasterizeLayer(target_ds, [1], innerBufferLayer, burn_values=[100]) print('writing png sgcls') # write to .png imageArray = np.array(target_ds.GetRasterBand(1).ReadAsArray()) im = Image.fromarray(imageArray) im.save(xmlFileName.replace('.xml', 'segcls.png')) print('writing GTIFF sgobj') ## create objectSegment target_ds = gdal.GetDriverByName('GTiff').Create( xmlFileName.replace('.xml', 'segobj.tif'), srcRaster.RasterXSize, srcRaster.RasterYSize, 1, gdal.GDT_Byte) target_ds.SetGeoTransform(srcRaster.GetGeoTransform()) target_ds.SetProjection(srcRaster.GetProjection()) band = target_ds.GetRasterBand(1) band.SetNoDataValue(NoData_value) # Rasterize gdal.RasterizeLayer(target_ds, [1], outerBufferLayer, burn_values=[255]) gdal.RasterizeLayer(target_ds, [1], innerBufferLayer, burn_values=[100], options=['ATTRIBUTE=objid']) print('writing png sgobj') # write to .png imageArray = np.array(target_ds.GetRasterBand(1).ReadAsArray()) im = Image.fromarray(imageArray) im.save(xmlFileName.replace('.xml', 'segobj.png')) entry = { 'rasterFileName': outputRaster, 'geoJsonFileName': geoJson, 'annotationName': xmlFileName, 'width': srcRaster.RasterXSize, 'height': srcRaster.RasterYSize, 'depth': srcRaster.RasterCount, 'basename': os.path.splitext(os.path.basename(rasterImageName))[0] } return entry
def zonal_stats(vectors, raster, layer_num=0, band_num=1, nodata_value=None, global_src_extent=False, categorical=False, stats=None, copy_properties=False, all_touched=False, transform=None, add_stats=None, raster_out=False): """Summary statistics of a raster, broken out by vector geometries. Attributes ---------- vectors : path to an OGR vector source or list of geo_interface or WKT str raster : ndarray or path to a GDAL raster source If ndarray is passed, the `transform` kwarg is required. layer_num : int, optional If `vectors` is a path to an OGR source, the vector layer to use (counting from 0). defaults to 0. band_num : int, optional If `raster` is a GDAL source, the band number to use (counting from 1). defaults to 1. nodata_value : float, optional If `raster` is a GDAL source, this value overrides any NODATA value specified in the file's metadata. If `None`, the file's metadata's NODATA value (if any) will be used. `ndarray`s don't support `nodata_value`. defaults to `None`. global_src_extent : bool, optional Pre-allocate entire raster before iterating over vector features. Use `True` if limited by disk IO or indexing into raster; requires sufficient RAM to store array in memory Use `False` with fast disks and a well-indexed raster, or when memory-constrained. Ignored when `raster` is an ndarray, because it is already completely in memory. defaults to `False`. categorical : bool, optional stats : list of str, or space-delimited str, optional Which statistics to calculate for each zone. All possible choices are listed in `VALID_STATS`. defaults to `DEFAULT_STATS`, a subset of these. copy_properties : bool, optional Include feature properties alongside the returned stats. defaults to `False` all_touched : bool, optional Whether to include every raster cell touched by a geometry, or only those having a center point within the polygon. defaults to `False` transform : list of float, optional GDAL-style geotransform coordinates when `raster` is an ndarray. Required when `raster` is an ndarray, otherwise ignored. add_stats : Dictionary with names and functions of additional statistics to compute, optional raster_out : Include the masked numpy array for each feature, optional Each feature dictionary will have the following additional keys: clipped raster (`mini_raster`) Geo-transform (`mini_raster_GT`) No Data Value (`mini_raster_NDV`) Returns ------- list of dicts Each dict represents one vector geometry. Its keys include `__fid__` (the geometry feature id) and each of the `stats` requested. """ if not stats: if not categorical: stats = DEFAULT_STATS else: stats = [] else: if isinstance(stats, str): if stats in ['*', 'ALL']: stats = VALID_STATS else: stats = stats.split() for x in stats: if x.startswith("percentile_"): try: get_percentile(x) except ValueError: raise RasterStatsError( "Stat `%s` is not valid; must use" " `percentile_` followed by a float >= 0 or <= 100") elif x not in VALID_STATS: raise RasterStatsError( "Stat `%s` not valid; " "must be one of \n %r" % (x, VALID_STATS)) run_count = False if categorical or 'majority' in stats or 'minority' in stats or \ 'unique' in stats: # run the counter once, only if needed run_count = True if isinstance(raster, np.ndarray): raster_type = 'ndarray' # must have transform arg if not transform: raise RasterStatsError("Must provide the 'transform' kwarg when " "using ndarrays as src raster") rgt = transform rsize = (raster.shape[1], raster.shape[0]) # global_src_extent is implicitly turned on, array is already in memory if not global_src_extent: global_src_extent = True if nodata_value: raise NotImplementedError("ndarrays don't support 'nodata_value'") else: raster_type = 'gdal' rds = gdal.Open(raster, GA_ReadOnly) if not rds: raise RasterStatsError("Cannot open %r as GDAL raster" % raster) rb = rds.GetRasterBand(band_num) rgt = rds.GetGeoTransform() rsize = (rds.RasterXSize, rds.RasterYSize) if nodata_value is not None: nodata_value = float(nodata_value) rb.SetNoDataValue(nodata_value) else: nodata_value = rb.GetNoDataValue() features_iter, strategy, spatial_ref = get_features(vectors, layer_num) if global_src_extent and raster_type == 'gdal': # create an in-memory numpy array of the source raster data # covering the whole extent of the vector layer if strategy != "ogr": raise RasterStatsError("global_src_extent requires OGR vector") # find extent of ALL features ds = ogr.Open(vectors) layer = ds.GetLayer(layer_num) ex = layer.GetExtent() # transform from OGR extent to xmin, ymin, xmax, ymax layer_extent = (ex[0], ex[2], ex[1], ex[3]) global_src_offset = bbox_to_pixel_offsets(rgt, layer_extent, rsize) global_src_array = rb.ReadAsArray(*global_src_offset) elif global_src_extent and raster_type == 'ndarray': global_src_offset = (0, 0, raster.shape[0], raster.shape[1]) global_src_array = raster mem_drv = ogr.GetDriverByName('Memory') driver = gdal.GetDriverByName('MEM') results = [] for i, feat in enumerate(features_iter): if feat['type'] == "Feature": geom = shape(feat['geometry']) else: # it's just a geometry geom = shape(feat) # Point and MultiPoint don't play well with GDALRasterize # convert them into box polygons the size of a raster cell buff = rgt[1] / 2.0 if geom.type == "MultiPoint": geom = MultiPolygon([box(*(pt.buffer(buff).bounds)) for pt in geom.geoms]) elif geom.type == 'Point': geom = box(*(geom.buffer(buff).bounds)) ogr_geom_type = shapely_to_ogr_type(geom.type) geom_bounds = list(geom.bounds) # calculate new pixel coordinates of the feature subset src_offset = bbox_to_pixel_offsets(rgt, geom_bounds, rsize) new_gt = ( (rgt[0] + (src_offset[0] * rgt[1])), rgt[1], 0.0, (rgt[3] + (src_offset[1] * rgt[5])), 0.0, rgt[5] ) if src_offset[2] <= 0 or src_offset[3] <= 0: # we're off the raster completely, no overlap at all # so there's no need to even bother trying to calculate feature_stats = dict([(s, None) for s in stats]) else: if not global_src_extent: # use feature's source extent and read directly from source # fastest option when you have fast disks and fast raster # advantage: each feature uses the smallest raster chunk # disadvantage: lots of disk reads on the source raster src_array = rb.ReadAsArray(*src_offset) else: # derive array from global source extent array # useful *only* when disk IO or raster format inefficiencies # are your limiting factor # advantage: reads raster data in one pass before loop # disadvantage: large vector extents combined with big rasters # require lotsa memory xa = src_offset[0] - global_src_offset[0] ya = src_offset[1] - global_src_offset[1] xb = xa + src_offset[2] yb = ya + src_offset[3] src_array = global_src_array[ya:yb, xa:xb] # Create a temporary vector layer in memory mem_ds = mem_drv.CreateDataSource('out') mem_layer = mem_ds.CreateLayer('out', spatial_ref, ogr_geom_type) ogr_feature = ogr.Feature(feature_def=mem_layer.GetLayerDefn()) ogr_geom = ogr.CreateGeometryFromWkt(geom.wkt) ogr_feature.SetGeometryDirectly(ogr_geom) mem_layer.CreateFeature(ogr_feature) # Rasterize it rvds = driver.Create('rvds', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) rvds.SetGeoTransform(new_gt) if all_touched: gdal.RasterizeLayer(rvds, [1], mem_layer, None, None, burn_values=[1], options=['ALL_TOUCHED=True']) else: gdal.RasterizeLayer(rvds, [1], mem_layer, None, None, burn_values=[1], options=['ALL_TOUCHED=False']) rv_array = rvds.ReadAsArray() # Mask the source data array with our current feature # we take the logical_not to flip 0<->1 for the correct mask effect # we also mask out nodata values explicitly masked = np.ma.MaskedArray( src_array, mask=np.logical_or( src_array == nodata_value, np.logical_not(rv_array) ) ) if run_count: pixel_count = Counter(masked.compressed()) if categorical: feature_stats = dict(pixel_count) else: feature_stats = {} if 'min' in stats: feature_stats['min'] = float(masked.min()) if 'max' in stats: feature_stats['max'] = float(masked.max()) if 'mean' in stats: feature_stats['mean'] = float(masked.mean()) if 'count' in stats: feature_stats['count'] = int(masked.count()) # optional if 'sum' in stats: feature_stats['sum'] = float(masked.sum()) if 'std' in stats: feature_stats['std'] = float(masked.std()) if 'median' in stats: feature_stats['median'] = float(np.median(masked.compressed())) if 'majority' in stats:cd try: feature_stats['majority'] = pixel_count.most_common(1)[0][0] except IndexError: feature_stats['majority'] = None if 'minority' in stats: try: feature_stats['minority'] = pixel_count.most_common()[-1][0] except IndexError: feature_stats['minority'] = None if 'unique' in stats: feature_stats['unique'] = len(list(pixel_count.keys())) if 'range' in stats: try: rmin = feature_stats['min'] except KeyError: rmin = float(masked.min()) try: rmax = feature_stats['max'] except KeyError: rmax = float(masked.max()) feature_stats['range'] = rmax - rmin for pctile in [s for s in stats if s.startswith('percentile_')]: q = get_percentile(pctile) pctarr = masked.compressed() if pctarr.size == 0: feature_stats[pctile] = None else: feature_stats[pctile] = np.percentile(pctarr, q) if add_stats is not None: for stat_name, stat_func in add_stats.items(): feature_stats[stat_name] = stat_func(masked) if raster_out: masked.fill_value = nodata_value masked.data[masked.mask] = nodata_value feature_stats['mini_raster'] = masked feature_stats['mini_raster_GT'] = new_gt feature_stats['mini_raster_NDV'] = nodata_value # Use the enumerated id as __fid__ feature_stats['__fid__'] = i if 'properties' in feat and copy_properties: for key, val in list(feat['properties'].items()): feature_stats[key] = val results.append(feature_stats)
def shp_to_raster(shp, inSource, cellsize, nodata, outRaster, epsg=None, rst_template=None, api='gdal', snap=None): """ Feature Class to Raster cellsize will be ignored if rst_template is defined * API's Available: - gdal; - arcpy; - pygrass; - grass; """ if api == 'gdal': from osgeo import gdal, ogr from gasp.prop.ff import drv_name if not epsg: from gasp.prop.prj import get_shp_sref srs = get_shp_sref(shp).ExportToWkt() else: from gasp.prop.prj import epsg_to_wkt srs = epsg_to_wkt(epsg) # Get Extent dtShp = ogr.GetDriverByName( drv_name(shp)).Open(shp, 0) lyr = dtShp.GetLayer() if not rst_template: x_min, x_max, y_min, y_max = lyr.GetExtent() x_res = int((x_max - x_min) / cellsize) y_res = int((y_max - y_min) / cellsize) else: from gasp.fm.rst import rst_to_array img_temp = gdal.Open(rst_template) geo_transform = img_temp.GetGeoTransform() y_res, x_res = rst_to_array(rst_template).shape # Create output dtRst = gdal.GetDriverByName(drv_name(outRaster)).Create( outRaster, x_res, y_res, gdal.GDT_Byte ) if not rst_template: dtRst.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize)) else: dtRst.SetGeoTransform(geo_transform) dtRst.SetProjection(str(srs)) bnd = dtRst.GetRasterBand(1) bnd.SetNoDataValue(nodata) gdal.RasterizeLayer(dtRst, [1], lyr, burn_values=[1]) del lyr dtShp.Destroy() elif api == 'arcpy': import arcpy if rst_template: tempEnvironment0 = arcpy.env.extent arcpy.env.extent = template if snap: tempSnap = arcpy.env.snapRaster arcpy.env.snapRaster = snap obj_describe = arcpy.Describe(shp) geom = obj_describe.ShapeType if geom == u'Polygon': arcpy.PolygonToRaster_conversion( shp, inField, outRaster, "CELL_CENTER", "NONE", cellsize ) elif geom == u'Polyline': arcpy.PolylineToRaster_conversion( shp, inField, outRaster, "MAXIMUM_LENGTH", "NONE", cellsize ) if rst_template: arcpy.env.extent = tempEnvironment0 if snap: arcpy.env.snapRaster = tempSnap elif api == 'grass' or api == 'pygrass': """ Vectorial geometry to raster If source is None, the convertion will be based on the cat field. If source is a string, the convertion will be based on the field with a name equal to the given string. If source is a numeric value, all cells of the output raster will have that same value. """ __USE = "cat" if not inSource else "attr" if type(inSource) == str or \ type(inSource) == unicode else "val" if type(inSource) == int or \ type(inSource) == float else None if not __USE: raise ValueError('\'source\' parameter value is not valid') if api == 'pygrass': from grass.pygrass.modules import Module m = Module( "v.to.rast", input=shp, output=outRaster, use=__USE, attribute_column=inSource if __USE == "attr" else None, value=inSource if __USE == "val" else None, overwrite=True, run_=False, quiet=True ) m() else: from gasp import exec_cmd rcmd = exec_cmd(( "v.to.rast input={} output={} use={}{} " "--overwrite --quiet" ).format( shp, outRaster, __USE, "" if __USE == "cat" else " attribute_column={}".format(inSource) \ if __USE == "attr" else " val={}".format(inSource) )) else: raise ValueError('API {} is not available'.format(api)) return outRaster
def perform(self): geo_transform = self.raster_ds.GetGeoTransform() raster_b_box = self.__get_raster_b_box(geo_transform) raster_geom = QgsGeometry.fromRect(raster_b_box) crs = self.__create_spatial_reference() if self.use_global_extent: src_offset, src_array, new_geo_transform = self.__get_global_extent( raster_b_box, geo_transform) else: src_offset = None src_array = None new_geo_transform = None mem_vector_driver = ogr.GetDriverByName('Memory') mem_raster_driver = gdal.GetDriverByName('MEM') self.__populate_fields_operations() writer = VectorWriter(self.destination, None, self.fields.toList(), self.layer.wkbType(), self.layer.crs(), None) out_feat = QgsFeature() out_feat.initAttributes(len(self.fields)) out_feat.setFields(self.fields) features = vector.features(self.layer) last_progress = 0 total = 100.0 / len(features) if len(features) > 0 else 1 dlg, bar = QtHandler.progress_dialog( label='Fill grid with {}...'.format(self.basename_raster)) start_time = datetime.now() str_start_time = start_time.strftime(self.pt_br_format) self.logger.info('Running zonal stats to "{}" at {}...'.format( self.basename_raster, str_start_time)) for current, f in enumerate(features): geom = f.geometry() intersected_geom = raster_geom.intersection(geom) ogr_geom = ogr.CreateGeometryFromWkt( intersected_geom.exportToWkt()) if not self.use_global_extent: bbox = intersected_geom.boundingBox() x_min = bbox.xMinimum() x_max = bbox.xMaximum() y_min = bbox.yMinimum() y_max = bbox.yMaximum() (startColumn, startRow) = mapToPixel(x_min, y_max, geo_transform) (endColumn, endRow) = mapToPixel(x_max, y_min, geo_transform) width = endColumn - startColumn height = endRow - startRow if width == 0 or height == 0: continue src_offset = (startColumn, startRow, width, height) src_array = self.raster_band.ReadAsArray(*src_offset) src_array = src_array * self.scale + self.offset new_geo_transform = ( geo_transform[0] + src_offset[0] * geo_transform[1], geo_transform[1], 0.0, geo_transform[3] + src_offset[1] * geo_transform[5], 0.0, geo_transform[5], ) # Create a temporary vector layer in memory mem_vds = mem_vector_driver.CreateDataSource('out') mem_layer = mem_vds.CreateLayer('poly', crs, ogr.wkbPolygon) ft = ogr.Feature(mem_layer.GetLayerDefn()) ft.SetGeometry(ogr_geom) mem_layer.CreateFeature(ft) ft.Destroy() # Rasterize it rasterized_ds = mem_raster_driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) rasterized_ds.SetGeoTransform(new_geo_transform) gdal.RasterizeLayer(rasterized_ds, [1], mem_layer, burn_values=[1]) rasterized_array = rasterized_ds.ReadAsArray() out_feat.setGeometry(geom) masked = numpy.ma.MaskedArray( src_array, mask=numpy.logical_or(src_array == self.no_data, numpy.logical_not(rasterized_array))) attrs = self.__zonal_stats(f, masked) out_feat.setAttributes(attrs) writer.addFeature(out_feat) del mem_vds del rasterized_ds progress = int(current * total) if progress != last_progress and progress % 10 == 0: self.logger.debug('{}%'.format(str(progress))) bar.setValue(progress) last_progress = progress if last_progress != 100: bar.setValue(100) bar.close() dlg.close() del dlg del writer del self.raster_ds end_time = datetime.now() time_elapsed = end_time - start_time str_end_time = end_time.strftime(self.pt_br_format) self.logger.info( 'Summing up, done at {}! Time elapsed {}(hh:mm:ss.ms)'.format( str_end_time, time_elapsed))
def create(self): """ | Generate the output raster dataset """ gdal.UseExceptions() temp_shp = '{}/{}.shp'.format(Config.get('DATA_DIRECTORY'), uuid.uuid4().hex) try: # Read the supplied GeoJSON data into a DataFrame self.logger.info('Creating GeoDataFrame from input...') #self.logger.debug('GeoJSON follows:') #elf.logger.debug(self.geojson_data) #self.logger.debug('GeoJSON end') if isinstance(self.geojson_data, str): self.logger.info( 'Input GeoJSON is a string, not a dict => converting...') self.geojson_data = loads(self.geojson_data) #self.debug_dump_geojson_to_file('rasteriser_input_data_dump.json', self.geojson_data) input_data = GeoDataFrame.from_features(self.geojson_data) self.logger.debug(input_data.head(10)) self.logger.info('Done') # Create the fishnet if necessary if self.bounding_box is not None: # Use the supplied British National Grid bounding box self.logger.info( 'Generate fishnet GeoDataFrame from supplied bounding box...' ) fishnet_geojson = FishNet(bbox=self.bounding_box, netsize=self.resolution).create() elif self.fishnet is not None: # Use a supplied fishnet output self.logger.info( 'Generate fishnet GeoDataFrame from supplied GeoJSON...') if isinstance(self.fishnet, str): self.logger.info( 'Input fishnet GeoJSON is a string, not a dict => converting...' ) self.fishnet = loads(self.fishnet) fishnet_geojson = self.fishnet elif len(self.area_codes) > 0: # Use the LAD codes self.logger.info( 'Generate fishnet GeoDataFrame from supplied LAD codes...') fishnet_geojson = FishNet(lad=self.area_codes, netsize=self.resolution).create() else: raise ValueError( 'No boundary information supplied - please supply fishnet GeoJSON, bounding box, or list of LAD codes' ) #self.debug_dump_geojson_to_file('rasteriser_fishnet_data_dump.json', fishnet_geojson) fishnet = GeoDataFrame.from_features(fishnet_geojson) x_min, y_min, x_max, y_max = fishnet.total_bounds self.logger.debug(fishnet.head(10)) self.logger.info('Done') # Overlay intersection self.logger.info('Overlay data on fishnet using intersection...') intersection = overlay(fishnet, input_data, how='intersection') self.logger.info('Done') # Write area attribute into frame self.logger.info('Computing areas...') intersection['area'] = intersection.geometry.area self.logger.info('Done') # Create grid to rasterize via merge and assign an 'include' field based on the threshold self.logger.info('Doing merge...') self.logger.debug(intersection.head(10)) int_merge = fishnet.merge( intersection.groupby(['FID']).area.sum() / 100.0, on='FID') for i, row in int_merge.iterrows(): self.logger.debug('{} has area {}'.format(i, row['area'])) if row['area'] > self.area_threshold: int_merge.at[i, 'include_me'] = int( 0) if self.invert else int(1) else: int_merge.at[i, 'include_me'] = int( 1) if self.invert else int(0) self.logger.info('Done') self.logger.info('Compute bounds of dataset...') #x_min, y_min, x_max, y_max = int_merge.total_bounds xdim = int((x_max - x_min) / self.resolution) ydim = int((y_max - y_min) / self.resolution) self.logger.info( 'xmin = {}, ymin = {}, xmax = {}, ymax = {}'.format( x_min, y_min, x_max, y_max)) # Save as temporary shapefile (TO DO - understand what information is gained by doing this that is not present in GeoJSON) self.logger.info('Write out temporary shapefile...') int_merge.to_file(temp_shp) self.logger.info('Written to {}'.format(temp_shp)) # Open OGR dataset ogr_source = ogr.Open(temp_shp) output_file = '{}/{}'.format(Config.get('DATA_DIRECTORY'), self.output_filename) self.logger.info( 'Will write output raster to {}'.format(output_file)) # Create raster dataset and set projection driver = gdal.GetDriverByName('GTiff') rasterised = driver.Create(output_file, xdim, ydim, 1, gdal.GDT_Byte) rasterised.SetGeoTransform( (x_min, self.resolution, 0, y_max, 0, -self.resolution)) srs = osr.SpatialReference() srs.ImportFromEPSG(27700) rasterised.SetProjection(srs.ExportToWkt()) # Set nodata values band = rasterised.GetRasterBand(1) #band.SetNoDataValue(self.nodata) band.Fill(self.nodata) # Do rasterisation self.logger.info( 'Set transform and projection, about to rasterise layer...') gdal.RasterizeLayer(rasterised, [1], ogr_source.GetLayer(0), options=["ATTRIBUTE=include_me"]) self.logger.info('Done') rasterised.FlushCache() rasterised = None ogr_source = None except: self.logger.warning(traceback.format_exc()) finally: self.logger.info('Removing temporary files...') filestem = Path(temp_shp).stem for shpf in Path(Config.get('DATA_DIRECTORY')).glob( '{}.*'.format(filestem)): self.logger.info('Cleaning up {}'.format(shpf)) shpf.unlink()
def rasterize_geoseries(geoseries, bbox, projection, height, width, values=None): """Transform a geoseries to a raster, optionally. :param geoseries: GeoSeries or None :param bbox: tuple of floats, (x1, y1, x2, y2) :param projection: wkt projection string :param height: int :param width: int :param values: Series or None, object containing values (int, float, bool) that will be burned into the raster. If None, the result will be a boolean array indicating geometries. :returns: dictionary containing - values: a 3D numpy array (time, y, x) of int32 / float64 / bool type - no_data_value It is assumed that all geometries intersect the requested bbox. If geoseries is None or empty, an array full of nodata will be returned. """ # determine the dtype based on `values` if values is None or values.dtype == bool: dtype = np.uint8 # we cast to bool later as GDAL does not know bools no_data_value = 0 # False ogr_dtype = None if values is not None and geoseries is not None: geoseries = geoseries[values] # values is a boolean mask values = None # discard values elif str(values.dtype) == "category": # transform pandas Categorical dtype to normal dtype values = pd.Series(np.asarray(values), index=values.index) if values is not None: if np.issubdtype(values.dtype, np.floating): dtype = np.float64 # OGR only knows float64 no_data_value = get_dtype_max(dtype) ogr_dtype = ogr.OFTReal if geoseries is not None: # filter out the inf and NaN values mask = np.isfinite(values) geoseries = geoseries[mask] values = values[mask] elif np.issubdtype(values.dtype, np.integer): dtype = np.int32 # OGR knows int32 and int64, but GDAL only int32 no_data_value = get_dtype_max(dtype) ogr_dtype = ogr.OFTInteger else: raise TypeError( "Unsupported values dtype to rasterize: '{}'".format( values.dtype)) # initialize the array array = np.full((1, height, width), no_data_value, dtype=dtype) # if there are no features, return directly if geoseries is None or len(geoseries) == 0: return _finalize_rasterize_result(array, no_data_value) # drop empty geometries mask = ~geoseries.isnull() geoseries = geoseries[mask] values = values[mask] if values is not None else None # be strict about the bbox, it may lead to segfaults else x1, y1, x2, y2 = bbox if not ((x2 == x1 and y2 == y1) or (x1 < x2 and y1 < y2)): raise ValueError("Invalid bbox ({})".format(bbox)) # if the request is a point, we find the intersecting polygon and # "rasterize" it if x2 == x1 and y2 == y1: mask = geoseries.intersects(Point(x1, y1)) if not mask.any(): pass elif values is not None: array[:] = values[mask].iloc[-1] # take the last one else: array[:] = True return _finalize_rasterize_result(array, no_data_value) # create an output datasource in memory driver = ogr.GetDriverByName(str("Memory")) burn_attr = str("BURN_IT") sr = get_sr(projection) # prepare in-memory ogr layer ds_ogr = driver.CreateDataSource(str("")) layer = ds_ogr.CreateLayer(str(""), sr) layer_definition = layer.GetLayerDefn() if ogr_dtype is not None: field_definition = ogr.FieldDefn(burn_attr, ogr_dtype) layer.CreateField(field_definition) iterable = (zip(geoseries, values) if values is not None else zip( geoseries, repeat(True))) for geometry, value in iterable: feature = ogr.Feature(layer_definition) feature.SetGeometry(ogr.CreateGeometryFromWkb(geometry.wkb)) if ogr_dtype is not None: feature[burn_attr] = value layer.CreateFeature(feature) geo_transform = GeoTransform.from_bbox(bbox, height, width) dataset_kwargs = { "no_data_value": no_data_value, "projection": sr.ExportToWkt(), "geo_transform": geo_transform, } # ATTRIBUTE=BURN_ATTR burns the BURN_ATTR value of each feature if dtype == np.uint8: # this is our boolean dtype options = [] else: options = [str("ATTRIBUTE=") + burn_attr] with Dataset(array, **dataset_kwargs) as dataset: gdal.RasterizeLayer(dataset, (1, ), layer, options=options) return _finalize_rasterize_result(array, no_data_value)
def zonal_stats(feat, input_zone_polygon, input_value_raster, band): #, raster_band # Open data raster = gdal.Open(input_value_raster) shp = ogr.Open(input_zone_polygon) lyr = shp.GetLayer() # Get raster georeference info transform = raster.GetGeoTransform() xOrigin = transform[0] yOrigin = transform[3] pixelWidth = transform[1] pixelHeight = transform[5] # Reproject vector geometry to same projection as raster #sourceSR = lyr.GetSpatialRef() #targetSR = osr.SpatialReference() #targetSR.ImportFromWkt(raster.GetProjectionRef()) #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR) #feat = lyr.GetNextFeature() #geom = feat.GetGeometryRef() #geom.Transform(coordTrans) # Get extent of feat geom = feat.GetGeometryRef() if (geom.GetGeometryName() == 'MULTIPOLYGON'): count = 0 pointsX = [] pointsY = [] for polygon in geom: geomInner = geom.GetGeometryRef(count) ring = geomInner.GetGeometryRef(0) numpoints = ring.GetPointCount() for p in range(numpoints): lon, lat, z = ring.GetPoint(p) pointsX.append(lon) pointsY.append(lat) count += 1 elif (geom.GetGeometryName() == 'POLYGON'): ring = geom.GetGeometryRef(0) numpoints = ring.GetPointCount() pointsX = [] pointsY = [] for p in range(numpoints): lon, lat, z = ring.GetPoint(p) pointsX.append(lon) pointsY.append(lat) else: sys.exit("ERROR: Geometry needs to be either Polygon or Multipolygon") xmin = min(pointsX) xmax = max(pointsX) ymin = min(pointsY) ymax = max(pointsY) # Specify offset and rows and columns to read xoff = int((xmin - xOrigin) / pixelWidth) yoff = int((yOrigin - ymax) / pixelWidth) xcount = int( (xmax - xmin) / pixelWidth ) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side ycount = int( (ymax - ymin) / pixelWidth ) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side #print(xoff, yoff, xcount, ycount) # Create memory target raster target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte) target_ds.SetGeoTransform(( xmin, pixelWidth, 0, ymax, 0, pixelHeight, )) # Create for target raster the same projection as for the value raster raster_srs = osr.SpatialReference() raster_srs.ImportFromWkt(raster.GetProjectionRef()) target_ds.SetProjection(raster_srs.ExportToWkt()) # Rasterize zone polygon to raster gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1]) # Read raster as arrays dataBandRaster = raster.GetRasterBand(band) data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float) bandmask = target_ds.GetRasterBand(1) datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float) # data zone of raster dataZone = np.ma.masked_array(data, np.logical_not(datamask)) raster_srs = None raster = None shp = None lyr = None # Calculate statistics of zonal raster return int(round(np.mean(dataZone))), int(round(np.std(dataZone)))
def cut_by_geojson(input_file, output_file, shape_geojson): # Get coords for bounding box x, y = zip(*gj.utils.coords(gj.loads(shape_geojson))) min_x, max_x, min_y, max_y = min(x), max(x), min(y), max(y) # Open original data as read only dataset = gdal.Open(input_file, gdal.GA_ReadOnly) bands = dataset.RasterCount # Getting georeference info transform = dataset.GetGeoTransform() projection = dataset.GetProjection() xOrigin = transform[0] yOrigin = transform[3] pixelWidth = transform[1] pixelHeight = -transform[5] # Getting spatial reference of input raster srs = osr.SpatialReference() srs.ImportFromWkt(projection) # WGS84 projection reference OSR_WGS84_REF = osr.SpatialReference() OSR_WGS84_REF.ImportFromEPSG(4326) # OSR transformation wgs84_to_image_trasformation = osr.CoordinateTransformation( OSR_WGS84_REF, srs) XYmin = wgs84_to_image_trasformation.TransformPoint(min_x, max_y) XYmax = wgs84_to_image_trasformation.TransformPoint(max_x, min_y) # Computing Point1(i1,j1), Point2(i2,j2) i1 = int((XYmin[0] - xOrigin) / pixelWidth) j1 = int((yOrigin - XYmin[1]) / pixelHeight) i2 = int((XYmax[0] - xOrigin) / pixelWidth) j2 = int((yOrigin - XYmax[1]) / pixelHeight) new_cols = i2 - i1 + 1 new_rows = j2 - j1 + 1 # New upper-left X,Y values new_x = xOrigin + i1 * pixelWidth new_y = yOrigin - j1 * pixelHeight new_transform = (new_x, transform[1], transform[2], new_y, transform[4], transform[5]) wkt_geom = ogr.CreateGeometryFromJson(str(shape_geojson)) wkt_geom.Transform(wgs84_to_image_trasformation) target_ds = GDAL_MEMORY_DRIVER.Create('', new_cols, new_rows, 1, gdal.GDT_Byte) target_ds.SetGeoTransform(new_transform) target_ds.SetProjection(projection) # Create a memory layer to rasterize from. ogr_dataset = OGR_MEMORY_DRIVER.CreateDataSource('shapemask') ogr_layer = ogr_dataset.CreateLayer('shapemask', srs=srs) ogr_feature = ogr.Feature(ogr_layer.GetLayerDefn()) ogr_feature.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom.ExportToWkt())) ogr_layer.CreateFeature(ogr_feature) gdal.RasterizeLayer(target_ds, [1], ogr_layer, burn_values=[1], options=["ALL_TOUCHED=TRUE"]) # Create output file driver = gdal.GetDriverByName('GTiff') outds = driver.Create(output_file, new_cols, new_rows, bands, gdal.GDT_Float32) # Read in bands and store all the data in bandList mask_array = target_ds.GetRasterBand(1).ReadAsArray() band_list = [] for i in range(bands): band_list.append( dataset.GetRasterBand(i + 1).ReadAsArray(i1, j1, new_cols, new_rows)) for j in range(bands): data = np.where(mask_array == 1, band_list[j], mask_array) outds.GetRasterBand(j + 1).SetNoDataValue(0) outds.GetRasterBand(j + 1).WriteArray(data) outds.SetProjection(projection) outds.SetGeoTransform(new_transform) target_ds = None dataset = None outds = None ogr_dataset = None
off_ulx2, off_uly2 = map( int, gdal.ApplyGeoTransform(inv_gt, xmin, ymax)) off_lrx2, off_lry2 = map( int, gdal.ApplyGeoTransform(inv_gt, xmax, ymin)) rows2, columns2 = (off_lry2 - off_uly2) + 1, (off_lrx2 - off_ulx2) + 1 ras_tmp = gdal.GetDriverByName('MEM').Create( '', columns2, rows2, 1, gdal.GDT_Byte) ras_tmp.SetProjection(ras.GetProjection()) ras_gt = list(gt) ras_gt[0], ras_gt[3] = gdal.ApplyGeoTransform( gt, off_ulx2, off_uly2) ras_tmp.SetGeoTransform(ras_gt) gdal.RasterizeLayer(ras_tmp, [1], vect_tmp_lyr2, burn_values=[1]) mask = ras_tmp.GetRasterBand(1).ReadAsArray() aa = off_uly2 bb = off_lry2 + 1 cc = off_ulx2 dd = off_lrx2 + 1 # zone_ras_init = np.ma.masked_array(result[aa:bb, cc:dd], np.logical_not(mask), fill_value=-999999) zone_ras = np.ma.masked_array( values[aa:bb, cc:dd] ) # np.ma.masked_array(values[aa:bb, cc:dd], np.logical_not(mask), fill_value=-999999) zone_ras_list2 = zone_ras.compressed() #.tolist() if False: # col_list.append(-999999) col_list.append(-999999)
def dump_raster(self, filename, driver='GTiff', attr=None, pixel_size=1., remove=True): """ Output layer to GDAL Rasterfile Parameters ---------- filename : string path to shape-filename driver : string GDAL Raster Driver attr : string attribute to burn into raster pixel_size : float pixel Size in source units remove : bool if True removes existing output file """ layer = self.ds.GetLayer() layer.ResetReading() x_min, x_max, y_min, y_max = layer.GetExtent() cols = int((x_max - x_min) / pixel_size) rows = int((y_max - y_min) / pixel_size) # Todo: at the moment, always writing floats ds_out = io.gdal_create_dataset('MEM', '', cols, rows, 1, gdal_type=gdal.GDT_Float32) ds_out.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size)) proj = layer.GetSpatialRef() if proj is None: proj = self._srs ds_out.SetProjection(proj.ExportToWkt()) band = ds_out.GetRasterBand(1) band.FlushCache() print("Rasterize layers") if attr is not None: gdal.RasterizeLayer( ds_out, [1], layer, burn_values=[0], options=["ATTRIBUTE={0}".format(attr), "ALL_TOUCHED=TRUE"], callback=gdal.TermProgress) else: gdal.RasterizeLayer(ds_out, [1], layer, burn_values=[1], options=["ALL_TOUCHED=TRUE"], callback=gdal.TermProgress) io.write_raster_dataset(filename, ds_out, driver, remove=remove) del ds_out
feature.Destroy() new_id1 += 1 # potential anomalies elif (len(np.where((clipped_intersection.flatten()+clipped_t_t1.flatten())==2)[0])/size_t_t1)<thr_int and size_t_t1>=3: feature = ogr.Feature(layer1.GetLayerDefn()) feature.SetField('value', new_id2) feature.SetGeometry(vectorGeometry1) layer_no_intersection.CreateFeature(feature) feature.Destroy() new_id2 += 1 # we write raster with change polygons from t_t1 that have sufficient intersection ds_intersection = gdal.GetDriverByName('GTiff').Create(path_results + loss_folder_nn_t_t1 + image_name_nn_t_t1 + ".TIF", W, H, 1, gdal.GDT_Byte) ds_intersection.SetGeoTransform(geo) ds_intersection.SetProjection(proj) gdal.RasterizeLayer(ds_intersection, [1], layer_intersection, burn_values=[1]) source_intersection.Destroy() ds_intersection = None intersection_arr_list.append(intersection_arr) # no intersection with t-1 : t+1 # this is the raster with potential anomalies no_intersection_arr = image_array_outliers_nn_t_t1 - intersection_arr intersection_arr_anomaly = np.zeros_like(intersection_arr) ds_anomaly = gdal.GetDriverByName('GTiff').Create(path_results + "Anomaly/" + loss_folder_nn_t_t1 + "Anomaly_" +image_name_nn_t_t1 + ".TIF", W, H, 1, gdal.GDT_Byte) ds_anomaly.SetGeoTransform(geo) ds_anomaly.SetProjection(proj) gdal.RasterizeLayer(ds_anomaly, [1], layer_no_intersection, burn_values=[1]) ds_anomaly = None
ds_res = geolib.get_res(ds_list[0]) valid_area = dz.count() * ds_res[0] * ds_res[1] valid_area_perc = valid_area / glac_area min_valid_area_perc = 0.80 if valid_area_perc < min_valid_area_perc: print( "Not enough valid pixels. %0.1f%% percent of glacier polygon area" % (100 * valid_area_perc)) continue #Rasterize NED source dates if site == 'conus': z1_date_r_ds = iolib.mem_drv.CreateCopy('', ds_list[0]) gdal.RasterizeLayer(z1_date_r_ds, [1], z1_date_shp_lyr, options=["ATTRIBUTE=S_DATE_CLN"]) z1_date = np.ma.array(iolib.ds_getma(z1_date_r_ds), mask=glac_geom_mask) #Filter dz - throw out abs differences >150 m #Compute dz, volume change, mass balance and stats z1_stats = malib.print_stats(z1) z2_stats = malib.print_stats(z2) z2_elev_med = z2_stats[5] z2_elev_p16 = z2_stats[11] z2_elev_p84 = z2_stats[12] #These can be timestamp arrays or datetime objects t1 = z1_date
def processAlgorithm(self, progress): rasterPath = self.getParameterValue(self.INPUT_DEM) layer = dataobjects.getObjectFromUri( self.getParameterValue(self.BOUNDARY_LAYER)) step = self.getParameterValue(self.STEP) percentage = self.getParameterValue(self.USE_PERCENTAGE) outputPath = self.getOutputValue(self.OUTPUT_DIRECTORY) rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly) geoTransform = rasterDS.GetGeoTransform() rasterBand = rasterDS.GetRasterBand(1) noData = rasterBand.GetNoDataValue() cellXSize = abs(geoTransform[1]) cellYSize = abs(geoTransform[5]) rasterXSize = rasterDS.RasterXSize rasterYSize = rasterDS.RasterYSize rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize * rasterYSize, geoTransform[0] + cellXSize * rasterXSize, geoTransform[3]) rasterGeom = QgsGeometry.fromRect(rasterBBox) crs = osr.SpatialReference() crs.ImportFromProj4(str(layer.crs().toProj4())) memVectorDriver = ogr.GetDriverByName('Memory') memRasterDriver = gdal.GetDriverByName('MEM') features = vector.features(layer) total = 100.0 / len(features) if len(features) > 0 else 1 for current, f in enumerate(features): geom = f.geometry() intersectedGeom = rasterGeom.intersection(geom) if intersectedGeom.isGeosEmpty(): progress.setInfo( self.tr('Feature %d does not intersect raster or ' 'entirely located in NODATA area' % f.id())) continue fName = os.path.join( outputPath, 'hystogram_%s_%s.csv' % (layer.name(), f.id())) ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.exportToWkt()) bbox = intersectedGeom.boundingBox() xMin = bbox.xMinimum() xMax = bbox.xMaximum() yMin = bbox.yMinimum() yMax = bbox.yMaximum() (startColumn, startRow) = raster.mapToPixel(xMin, yMax, geoTransform) (endColumn, endRow) = raster.mapToPixel(xMax, yMin, geoTransform) width = endColumn - startColumn height = endRow - startRow srcOffset = (startColumn, startRow, width, height) srcArray = rasterBand.ReadAsArray(*srcOffset) if srcOffset[2] == 0 or srcOffset[3] == 0: progress.setInfo( self.tr('Feature %d is smaller than raster ' 'cell size' % f.id())) continue newGeoTransform = ( geoTransform[0] + srcOffset[0] * geoTransform[1], geoTransform[1], 0.0, geoTransform[3] + srcOffset[1] * geoTransform[5], 0.0, geoTransform[5] ) memVDS = memVectorDriver.CreateDataSource('out') memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon) ft = ogr.Feature(memLayer.GetLayerDefn()) ft.SetGeometry(ogrGeom) memLayer.CreateFeature(ft) ft.Destroy() rasterizedDS = memRasterDriver.Create('', srcOffset[2], srcOffset[3], 1, gdal.GDT_Byte) rasterizedDS.SetGeoTransform(newGeoTransform) gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1]) rasterizedArray = rasterizedDS.ReadAsArray() srcArray = numpy.nan_to_num(srcArray) masked = numpy.ma.MaskedArray(srcArray, mask=numpy.logical_or(srcArray == noData, numpy.logical_not(rasterizedArray))) self.calculateHypsometry(f.id(), fName, progress, masked, cellXSize, cellYSize, percentage, step) memVDS = None rasterizedDS = None progress.setPercentage(int(current * total)) rasterDS = None
def ConvertToRoadSegmentation(tif_file,geojson_file,out_file,isInstance=False): #Read Dataset from geo json file dataset = ogr.Open(geojson_file) if not dataset: print('No Dataset') return -1 layer = dataset.GetLayerByIndex(0) if not layer: print('No Layer') return -1 # First we will open our raster image, to understand how we will want to rasterize our vector raster_ds = gdal.Open(tif_file, gdal.GA_ReadOnly) # Fetch number of rows and columns ncol = raster_ds.RasterXSize nrow = raster_ds.RasterYSize # Fetch projection and extent proj = raster_ds.GetProjectionRef() ext = raster_ds.GetGeoTransform() raster_ds = None # Create the raster dataset memory_driver = gdal.GetDriverByName('GTiff') out_raster_ds = memory_driver.Create(out_file, ncol, nrow, 1, gdal.GDT_Byte) # Set the ROI image's projection and extent to our input raster's projection and extent out_raster_ds.SetProjection(proj) out_raster_ds.SetGeoTransform(ext) # Fill our output band with the 0 blank, no class label, value b = out_raster_ds.GetRasterBand(1) if isInstance: b.Fill(0) # Rasterize the shapefile layer to our new dataset status = gdal.RasterizeLayer(out_raster_ds, # output to our new dataset [1], # output to our new dataset's first band layer, # rasterize this layer None, None, # don't worry about transformations since we're in same projection [0], # burn value 0 ['ALL_TOUCHED=TRUE', # rasterize all pixels touched by polygons 'ATTRIBUTE=road_type'] # put raster values according to the 'id' field values ) else: b.Fill(0) # Rasterize the shapefile layer to our new dataset status = gdal.RasterizeLayer(out_raster_ds, # output to our new dataset [1], # output to our new dataset's first band layer, # rasterize this layer None, None, # don't worry about transformations since we're in same projection [255] # burn value 0 ) # Close dataset out_raster_ds = None return status
def rasterize(src, dst, update=False, prototype=None, nodata=-9999, init=-9999, te=None, tr=None, ts=None, sql=None, where=None, out_format='GTIFF', out_type='Byte', out_srs=None, co=[], working_format='MEM', **kwargs): #Some examples #dstds=rasterize(src ,dst,ts=[500, 500], out_type='Int16', burn_values=[37]) #dstds=rasterize(src ,'',prototype=prot, nodata=-999, init=-999, ts=[500, 500], out_type='Int16', options=["ATTRIBUTE=test"], out_format='MEM') #rasterize(src ,dst, tr=[0.01, 0.01], nodata=-999, init=-999, out_type='Int16', options=["ATTRIBUTE=test"]) #rasterize(src ,dst, tr=[0.01, 0.01], options=["ATTRIBUTE=test"], out_format='ECW', co=['TARGET=10']) #rasterize(src ,dst, where='test=1', update=True, burn_values = [10]) #rasterize(src ,dst, tr=[0.01, 0.01], te=[143.5, -43.5, 148.5, -39.5], out_type='Int16', options=["ATTRIBUTE=test"]) #Temporary files if working_format == 'MEM': tmpfd, tmpfile = [None, ''] else: tmpfd, tmpfile = tempfile.mkstemp() #Open the vector layer to rasterize from src_ds = ogr.Open(src) if not src_ds: raise RuntimeError, '\'%s\' does not exist in the file system.' % src if sql: src_lyr = ds.ExecuteSQL(sql) else: src_lyr = src_ds.GetLayer() if where: src_lyr.SetAttributeFilter(where) xmin, xmax, ymin, ymax = src_lyr.GetExtent() src_ext = xmin, ymin, xmax, ymax try: src_wkt = src_lyr.GetSpatialRef().ExportToWkt() if not out_srs: out_srs = src_wkt except: out_srs = 'LOCAL_CS["arbitrary"]' # From http://svn.osgeo.org/gdal/trunk/autotest/alg/rasterize.py datatype = gdal.GetDataTypeByName(out_type) #Get a GDAL Dataset to rasterize into dst_driver = None if update: #Can we update an existing raster dstds = gdal.Open(dst, gdalconst.GA_Update) else: gdal.ErrorReset() if prototype: protds = gdal.Open(prototype) dst_driver = protds.GetDriver() dstds = create_raster(protds.RasterXSize, protds.RasterYSize, working_format, tmpfile, protds.GetGeoTransform(), protds.GetProjection(), datatype=datatype, nodata=nodata, init=init) del protds elif te and tr: out_gt = (te[0], tr[0], 0, te[3], 0, -tr[1]) out_cols = int(math.ceil((te[2] - te[0]) / tr[0])) out_rows = int(math.ceil((te[3] - te[1]) / tr[1])) dstds = create_raster(out_cols, out_rows, working_format, tmpfile, out_gt, out_srs, datatype=datatype, nodata=nodata, init=init) elif te and ts: xres = (te[2] - te[0]) / float(ts[0]) yres = (te[3] - te[1]) / float(ts[1]) out_gt = (te[0], xres, 0, te[3], 0, -yres) dstds = create_raster(ts[0], ts[1], working_format, tmpfile, out_gt, out_srs, datatype=datatype, nodata=nodata, init=init) elif ts: xres = (src_ext[2] - src_ext[0]) / float(ts[0]) yres = (src_ext[3] - src_ext[1]) / float(ts[1]) out_gt = (src_ext[0], xres, 0, src_ext[3], 0, -yres) dstds = create_raster(ts[0], ts[1], working_format, tmpfile, out_gt, out_srs, datatype=datatype, nodata=nodata, init=init) elif tr: out_cols = int(math.ceil((src_ext[2] - src_ext[0]) / float(tr[0]))) out_rows = int(math.ceil((src_ext[3] - src_ext[1]) / float(tr[1]))) out_gt = (src_ext[0], tr[0], 0, src_ext[3], 0, -tr[1]) dstds = create_raster(out_cols, out_rows, working_format, tmpfile, out_gt, out_srs, datatype=datatype, nodata=nodata, init=init) else: raise RuntimeError, '%s does not exist and neither a prototype raster nor appropriate options were specified!' % dst if dst_driver is None: try: dst_driver = gdal.GetDriverByName(out_format) except: raise RuntimeError, 'Format driver %s not found, pick a supported driver.' % out_format err = gdal.RasterizeLayer(dstds, [1], src_lyr, **kwargs) if err != 0: raise RuntimeError, "error rasterizing layer: %s" % err if not update: dstds = dst_driver.CreateCopy(dst, dstds, 1, co) try: os.close(tmpfd) os.unlink(tmpfile) except: pass dstds.FlushCache() return dstds
def processAlgorithm(self, context, feedback): """ Based on code by Matthew Perry https://gist.github.com/perrygeo/5667173 :param context: """ layer = QgsProcessingUtils.mapLayerFromString( self.getParameterValue(self.INPUT_VECTOR), context) rasterPath = str(self.getParameterValue(self.INPUT_RASTER)) bandNumber = self.getParameterValue(self.RASTER_BAND) columnPrefix = self.getParameterValue(self.COLUMN_PREFIX) useGlobalExtent = self.getParameterValue(self.GLOBAL_EXTENT) rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly) geoTransform = rasterDS.GetGeoTransform() rasterBand = rasterDS.GetRasterBand(bandNumber) noData = rasterBand.GetNoDataValue() cellXSize = abs(geoTransform[1]) cellYSize = abs(geoTransform[5]) rasterXSize = rasterDS.RasterXSize rasterYSize = rasterDS.RasterYSize rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize * rasterYSize, geoTransform[0] + cellXSize * rasterXSize, geoTransform[3]) rasterGeom = QgsGeometry.fromRect(rasterBBox) crs = osr.SpatialReference() crs.ImportFromProj4(str(layer.crs().toProj4())) if useGlobalExtent: xMin = rasterBBox.xMinimum() xMax = rasterBBox.xMaximum() yMin = rasterBBox.yMinimum() yMax = rasterBBox.yMaximum() (startColumn, startRow) = mapToPixel(xMin, yMax, geoTransform) (endColumn, endRow) = mapToPixel(xMax, yMin, geoTransform) width = endColumn - startColumn height = endRow - startRow srcOffset = (startColumn, startRow, width, height) srcArray = rasterBand.ReadAsArray(*srcOffset) srcArray = srcArray * rasterBand.GetScale() + rasterBand.GetOffset( ) newGeoTransform = ( geoTransform[0] + srcOffset[0] * geoTransform[1], geoTransform[1], 0.0, geoTransform[3] + srcOffset[1] * geoTransform[5], 0.0, geoTransform[5], ) memVectorDriver = ogr.GetDriverByName('Memory') memRasterDriver = gdal.GetDriverByName('MEM') fields = layer.fields() (idxMin, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'min', 21, 6) (idxMax, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'max', 21, 6) (idxSum, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'sum', 21, 6) (idxCount, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'count', 21, 6) (idxMean, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'mean', 21, 6) (idxStd, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'std', 21, 6) (idxUnique, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'unique', 21, 6) (idxRange, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'range', 21, 6) (idxVar, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'var', 21, 6) (idxMedian, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'median', 21, 6) if hasSciPy: (idxMode, fields) = vector.findOrCreateField(layer, fields, columnPrefix + 'mode', 21, 6) writer = self.getOutputFromName(self.OUTPUT_LAYER).getVectorWriter( fields, layer.wkbType(), layer.crs(), context) outFeat = QgsFeature() outFeat.initAttributes(len(fields)) outFeat.setFields(fields) features = QgsProcessingUtils.getFeatures(layer, context) total = 100.0 / QgsProcessingUtils.featureCount(layer, context) for current, f in enumerate(features): geom = f.geometry() intersectedGeom = rasterGeom.intersection(geom) ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.exportToWkt()) if not useGlobalExtent: bbox = intersectedGeom.boundingBox() xMin = bbox.xMinimum() xMax = bbox.xMaximum() yMin = bbox.yMinimum() yMax = bbox.yMaximum() (startColumn, startRow) = mapToPixel(xMin, yMax, geoTransform) (endColumn, endRow) = mapToPixel(xMax, yMin, geoTransform) width = endColumn - startColumn height = endRow - startRow if width == 0 or height == 0: continue srcOffset = (startColumn, startRow, width, height) srcArray = rasterBand.ReadAsArray(*srcOffset) srcArray = srcArray * rasterBand.GetScale( ) + rasterBand.GetOffset() newGeoTransform = ( geoTransform[0] + srcOffset[0] * geoTransform[1], geoTransform[1], 0.0, geoTransform[3] + srcOffset[1] * geoTransform[5], 0.0, geoTransform[5], ) # Create a temporary vector layer in memory memVDS = memVectorDriver.CreateDataSource('out') memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon) ft = ogr.Feature(memLayer.GetLayerDefn()) ft.SetGeometry(ogrGeom) memLayer.CreateFeature(ft) ft.Destroy() # Rasterize it rasterizedDS = memRasterDriver.Create('', srcOffset[2], srcOffset[3], 1, gdal.GDT_Byte) rasterizedDS.SetGeoTransform(newGeoTransform) gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1]) rasterizedArray = rasterizedDS.ReadAsArray() srcArray = numpy.nan_to_num(srcArray) masked = numpy.ma.MaskedArray( srcArray, mask=numpy.logical_or(srcArray == noData, numpy.logical_not(rasterizedArray))) outFeat.setGeometry(geom) attrs = f.attributes() v = float(masked.min()) attrs.insert(idxMin, None if numpy.isnan(v) else v) v = float(masked.max()) attrs.insert(idxMax, None if numpy.isnan(v) else v) v = float(masked.sum()) attrs.insert(idxSum, None if numpy.isnan(v) else v) attrs.insert(idxCount, int(masked.count())) v = float(masked.mean()) attrs.insert(idxMean, None if numpy.isnan(v) else v) v = float(masked.std()) attrs.insert(idxStd, None if numpy.isnan(v) else v) attrs.insert(idxUnique, numpy.unique(masked.compressed()).size) v = float(masked.max()) - float(masked.min()) attrs.insert(idxRange, None if numpy.isnan(v) else v) v = float(masked.var()) attrs.insert(idxVar, None if numpy.isnan(v) else v) v = float(numpy.ma.median(masked)) attrs.insert(idxMedian, None if numpy.isnan(v) else v) if hasSciPy: attrs.insert(idxMode, float(mode(masked, axis=None)[0][0])) outFeat.setAttributes(attrs) writer.addFeature(outFeat) memVDS = None rasterizedDS = None feedback.setProgress(int(current * total)) rasterDS = None del writer
def calcZonalBandStatsTestPolyPts(veclyr, valsimg, imgbandidx, minthres, maxthres, minfield=None, maxfield=None, meanfield=None, stddevfield=None, sumfield=None, countfield=None, modefield=None, medianfield=None): """ A function which calculates zonal statistics for a particular image band. If unsure then use this function. This function tests whether 1 or more pixels has been found within the polygon and if not then the centroid use used to find a value for the polygon. If you are unsure as to whether the pixels are small enough to be contained within all the polygons then use this function. :param veclyr: OGR vector layer object containing the geometries being processed and to which the stats will be written. :param valsimg: the values image :param imgbandidx: the index (starting at 1) of the image band for which the stats will be calculated. If defined the no data value of the band will be ignored. :param minthres: a lower threshold for values which will be included in the stats calculation. :param maxthres: a upper threshold for values which will be included in the stats calculation. :param minfield: the name of the field for the min value (None or not specified to be ignored). :param maxfield: the name of the field for the max value (None or not specified to be ignored). :param meanfield: the name of the field for the mean value (None or not specified to be ignored). :param stddevfield: the name of the field for the standard deviation value (None or not specified to be ignored). :param sumfield: the name of the field for the sum value (None or not specified to be ignored). :param countfield: the name of the field for the count (of number of pixels) value (None or not specified to be ignored). :param modefield: the name of the field for the mode value (None or not specified to be ignored). :param medianfield: the name of the field for the median value (None or not specified to be ignored). """ if modefield is not None: import scipy.stats.mstats gdal.UseExceptions() try: if veclyr is None: raise Exception("The inputted vector layer was None") if (minfield is None) and (maxfield is None) and (meanfield is None) and (stddevfield is None) and (sumfield is None) and (countfield is None) and (modefield is None) and (medianfield is None): raise Exception("At least one field needs to be specified for there is to an output.") imgDS = gdal.OpenEx(valsimg, gdal.GA_ReadOnly) if imgDS is None: raise Exception("Could not open '{}'".format(valsimg)) imgband = imgDS.GetRasterBand(imgbandidx) if imgband is None: raise Exception("Could not find image band '{}'".format(imgbandidx)) imgGeoTrans = imgDS.GetGeoTransform() img_wkt_str = imgDS.GetProjection() img_spatial_ref = osr.SpatialReference() img_spatial_ref.ImportFromWkt(img_wkt_str) pixel_width = imgGeoTrans[1] pixel_height = imgGeoTrans[5] imgSizeX = imgDS.RasterXSize imgSizeY = imgDS.RasterYSize imgNoDataVal = imgband.GetNoDataValue() veclyr_spatial_ref = veclyr.GetSpatialRef() if not img_spatial_ref.IsSame(veclyr_spatial_ref): imgDS = None vecDS = None raise Exception("Inputted raster and vector layers have different projections: ('{0}' '{1}') ".format(vecfile, valsimg)) veclyrDefn = veclyr.GetLayerDefn() outFieldAtts = [minfield, maxfield, meanfield, stddevfield, sumfield, countfield, modefield, medianfield] for outattname in outFieldAtts: if outattname is not None: found = False for i in range(veclyrDefn.GetFieldCount()): if veclyrDefn.GetFieldDefn(i).GetName().lower() == outattname.lower(): found = True break if not found: veclyr.CreateField(ogr.FieldDefn(outattname.lower(), ogr.OFTReal)) fieldAttIdxs = dict() for outattname in outFieldAtts: if outattname is not None: fieldAttIdxs[outattname] = veclyr.FindFieldIndex(outattname.lower(), True) vec_mem_drv = ogr.GetDriverByName('Memory') img_mem_drv = gdal.GetDriverByName('MEM') # Iterate through features. openTransaction = False transactionStep = 20000 nextTransaction = transactionStep nFeats = veclyr.GetFeatureCount(True) step = math.floor(nFeats/10) feedback = 10 feedback_next = step counter = 0 print("Started .0.", end='', flush=True) veclyr.ResetReading() feat = veclyr.GetNextFeature() while feat is not None: if (nFeats>10) and (counter == feedback_next): print(".{}.".format(feedback), end='', flush=True) feedback_next = feedback_next + step feedback = feedback + 10 if not openTransaction: veclyr.StartTransaction() openTransaction = True if feat is not None: feat_geom = feat.geometry() if feat_geom is not None: feat_bbox = feat_geom.GetEnvelope() havepxls = True x1Sp = float(feat_bbox[0] - imgGeoTrans[0]) x2Sp = float(feat_bbox[1] - imgGeoTrans[0]) y1Sp = float(feat_bbox[3] - imgGeoTrans[3]) y2Sp = float(feat_bbox[2] - imgGeoTrans[3]) if x1Sp == 0.0: x1 = 0 else: x1 = int(x1Sp / pixel_width) - 1 if x2Sp == 0.0: x2 = 0 else: x2 = int(x2Sp / pixel_width) + 1 if y1Sp == 0.0: y1 = 0 else: y1 = int(y1Sp / pixel_height) - 1 if y2Sp == 0.0: y2 = 0 else: y2 = int(y2Sp / pixel_height) + 1 if x1 < 0: x1 = 0 elif x1 >= imgSizeX: x1 = imgSizeX-1 if x2 < 0: x2 = 0 elif x2 >= imgSizeX: x2 = imgSizeX-1 if y1 < 0: y1 = 0 elif y1 >= imgSizeY: y1 = imgSizeY-1 if y2 < 0: y2 = 0 elif y2 >= imgSizeY: y2 = imgSizeY-1 xsize = x2 - x1 ysize = y2 - y1 if (xsize == 0) or (ysize == 0): havepxls = False # Define the image ROI for the feature src_offset = (x1, y1, xsize, ysize) if havepxls: # Read the band array. src_array = imgband.ReadAsArray(*src_offset) else: src_array = None if (src_array is not None) and havepxls: # calculate new geotransform of the feature subset subGeoTrans = ((imgGeoTrans[0] + (src_offset[0] * imgGeoTrans[1])), imgGeoTrans[1], 0.0, (imgGeoTrans[3] + (src_offset[1] * imgGeoTrans[5])), 0.0, imgGeoTrans[5]) # Create a temporary vector layer in memory vec_mem_ds = vec_mem_drv.CreateDataSource('out') vec_mem_lyr = vec_mem_ds.CreateLayer('poly', veclyr_spatial_ref, ogr.wkbPolygon) vec_mem_lyr.CreateFeature(feat.Clone()) # Rasterize the feature. img_tmp_ds = img_mem_drv.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) img_tmp_ds.SetGeoTransform(subGeoTrans) img_tmp_ds.SetProjection(img_wkt_str) gdal.RasterizeLayer(img_tmp_ds, [1], vec_mem_lyr, burn_values=[1]) rv_array = img_tmp_ds.ReadAsArray() # Mask the data vals array to feature (logical_not to flip 0<->1 to get the correct mask effect). if imgNoDataVal is not None: masked = numpy.ma.MaskedArray(src_array, mask=numpy.logical_or(src_array == imgNoDataVal, numpy.logical_not(rv_array), numpy.logical_and(src_array >= minthres, src_array < maxthres))) else: masked = numpy.ma.MaskedArray(src_array, mask=numpy.logical_or(numpy.logical_not(rv_array), numpy.logical_and(src_array >= minthres, src_array < maxthres))) if float(masked.count()) > 0: if minfield is not None: min_val = float(masked.min()) feat.SetField(fieldAttIdxs[minfield], min_val) if maxfield is not None: max_val = float(masked.max()) feat.SetField(fieldAttIdxs[maxfield], max_val) if meanfield is not None: mean_val = float(masked.mean()) feat.SetField(fieldAttIdxs[meanfield], mean_val) if stddevfield is not None: stddev_val = float(masked.std()) feat.SetField(fieldAttIdxs[stddevfield], stddev_val) if sumfield is not None: sum_val = float(masked.sum()) feat.SetField(fieldAttIdxs[sumfield], sum_val) if countfield is not None: count_val = float(masked.count()) feat.SetField(fieldAttIdxs[countfield], count_val) if modefield is not None: mode_val, mode_count = scipy.stats.mstats.mode(masked.flatten()) mode_val = float(mode_val) feat.SetField(fieldAttIdxs[modefield], mode_val) if medianfield is not None: median_val = float(numpy.ma.median(masked)) feat.SetField(fieldAttIdxs[medianfield], median_val) else: subTLX = (imgGeoTrans[0] + (src_offset[0] * imgGeoTrans[1])) subTLY = (imgGeoTrans[3] + (src_offset[1] * imgGeoTrans[5])) resX = imgGeoTrans[1] resY = imgGeoTrans[5] ptx,pty,ptz = feat.GetGeometryRef().Centroid().GetPoint() xOff = math.floor((ptx - subTLX) / resX) yOff = math.floor((pty - subTLY) / resY) if xOff < 0: xOff = 0 if xOff >= xsize: xOff = xsize - 1 if yOff < 0: yOff = 0 if yOff >= ysize: yOff = ysize - 1 out_val = float(src_array[yOff, xOff]) if minfield is not None: feat.SetField(fieldAttIdxs[minfield], out_val) if maxfield is not None: feat.SetField(fieldAttIdxs[maxfield], out_val) if meanfield is not None: feat.SetField(fieldAttIdxs[meanfield], out_val) if stddevfield is not None: feat.SetField(fieldAttIdxs[stddevfield], 0.0) if sumfield is not None: feat.SetField(fieldAttIdxs[sumfield], out_val) if countfield is not None: feat.SetField(fieldAttIdxs[countfield], 1.0) if modefield is not None: feat.SetField(fieldAttIdxs[modefield], out_val) if medianfield is not None: feat.SetField(fieldAttIdxs[medianfield], out_val) # Write the updated feature to the vector layer. veclyr.SetFeature(feat) vec_mem_ds = None img_tmp_ds = None if (counter == nextTransaction) and openTransaction: veclyr.CommitTransaction() openTransaction = False nextTransaction = nextTransaction + transactionStep feat = veclyr.GetNextFeature() counter = counter + 1 if openTransaction: veclyr.CommitTransaction() openTransaction = False print(" Completed") imgDS = None except Exception as e: print("Error Image File: {}".format(valsimg), file=sys.stderr) raise e
def build_mask(points, georef_fn): # This function is based loosely off of Frank's tests for # gdal.RasterizeLayer. # https://svn.osgeo.org/gdal/trunk/autotest/alg/rasterize.py # open the reference # we use this to find the size, projection, # spatial reference, and geotransform to # project the subcatchment to ds = gdal.Open(georef_fn) pszProjection = ds.GetProjectionRef() if pszProjection is not None: srs = osr.SpatialReference() if srs.ImportFromWkt(pszProjection) == gdal.CE_None: pszPrettyWkt = srs.ExportToPrettyWkt(False) geoTransform = ds.GetGeoTransform() # initialize a new raster in memory driver = gdal.GetDriverByName('MEM') target_ds = driver.Create('', ds.RasterXSize, ds.RasterYSize, 1, gdal.GDT_Byte) target_ds.SetGeoTransform(geoTransform) target_ds.SetProjection(pszProjection) # close the reference ds = None # Create a memory layer to rasterize from. rast_ogr_ds = ogr.GetDriverByName('Memory') \ .CreateDataSource('wrk') rast_mem_lyr = rast_ogr_ds.CreateLayer('poly', srs=srs) # Add a polygon. coords = ','.join(['%f %f' % (lng, lat) for lng, lat in points]) wkt_geom = 'POLYGON((%s))' % coords feat = ogr.Feature(rast_mem_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_mem_lyr.CreateFeature(feat) # Run the rasterization algorithm err = gdal.RasterizeLayer(target_ds, [1], rast_mem_lyr, burn_values=[255]) rast_ogr_ds = None rast_mem_lyr = None band = target_ds.GetRasterBand(1) data = band.ReadAsArray().T # find nonzero indices and return mask = -1 * (data / 255.0) + 1 m, n = mask.shape for i in range(1, m - 1): for j in range(1, n - 1): cnt = np.sum(mask[i - 1:1 + 1, j - 1:j + 1]) if cnt > 6: mask[i, j] = 1 return mask
def createDistanceTransformByFeatureIndex(feature_index, rasterSrc, vectorSrc, npDistFileName='', units='pixels'): ## open source vector file that truth data source_ds = ogr.Open(vectorSrc) source_layer = source_ds.GetLayer() #Define feature my_feature = source_layer[feature_index] #Spatial Reference srs = source_layer.GetSpatialRef() #Create feature Layer outDriver = ogr.GetDriverByName('MEMORY') outDataSource = outDriver.CreateDataSource('memData') Feature_Layer = outDataSource.CreateLayer("this_feature", srs, geom_type=ogr.wkbPolygon) #Add feature to layer Feature_Layer.CreateFeature(my_feature) ## extract data from src Raster File to be emulated ## open raster file that is to be emulated srcRas_ds = gdal.Open(rasterSrc) cols = srcRas_ds.RasterXSize rows = srcRas_ds.RasterYSize noDataValue = 0 metersIndex = 1 ## create First raster memory layer memdrv = gdal.GetDriverByName('MEM') dst_ds = memdrv.Create('', cols, rows, 1, gdal.GDT_Byte) dst_ds.SetGeoTransform(srcRas_ds.GetGeoTransform()) dst_ds.SetProjection(srcRas_ds.GetProjection()) band = dst_ds.GetRasterBand(1) band.SetNoDataValue(noDataValue) gdal.RasterizeLayer(dst_ds, [1], Feature_Layer, burn_values=[255]) srcBand = dst_ds.GetRasterBand(1) memdrv2 = gdal.GetDriverByName('MEM') prox_ds = memdrv2.Create('', cols, rows, 1, gdal.GDT_Int16) prox_ds.SetGeoTransform(srcRas_ds.GetGeoTransform()) prox_ds.SetProjection(srcRas_ds.GetProjection()) proxBand = prox_ds.GetRasterBand(1) proxBand.SetNoDataValue(noDataValue) options = ['NODATA=0'] gdal.ComputeProximity(srcBand, proxBand, options) memdrv3 = gdal.GetDriverByName('MEM') proxIn_ds = memdrv3.Create('', cols, rows, 1, gdal.GDT_Int16) proxIn_ds.SetGeoTransform(srcRas_ds.GetGeoTransform()) proxIn_ds.SetProjection(srcRas_ds.GetProjection()) proxInBand = proxIn_ds.GetRasterBand(1) proxInBand.SetNoDataValue(noDataValue) options = ['NODATA=0', 'VALUES=0'] gdal.ComputeProximity(srcBand, proxInBand, options) proxIn = gdalnumeric.BandReadAsArray(proxInBand) proxOut = gdalnumeric.BandReadAsArray(proxBand) proxTotal = proxIn.astype(float) - proxOut.astype(float) proxTotal = proxTotal * metersIndex if npDistFileName != '': np.save(npDistFileName, proxTotal) return proxTotal
def zonal_stats(vector_path, raster_path, nodata_value): # open raster layer rds = gdal.Open(raster_path, GA_ReadOnly) assert (rds) rb = rds.GetRasterBand(1) rgt = rds.GetGeoTransform() # set raster nodata value if nodata_value: nodata_value = float(nodata_value) rb.SetNoDataValue(nodata_value) # open vector layer vds = ogr.Open(vector_path, GA_ReadOnly) assert (vds) vlyr = vds.GetLayer(0) # compare EPSG values of vector and raster and change projection if necessary sourceSR = vlyr.GetSpatialRef() sourceSR.AutoIdentifyEPSG() EPSG_sourceSR = sourceSR.GetAuthorityCode(None) targetSR = osr.SpatialReference(wkt=rds.GetProjection()) targetSR.AutoIdentifyEPSG() EPSG_targetSR = targetSR.GetAuthorityCode(None) if EPSG_sourceSR != EPSG_sourceSR: # reproject vector geometry to same projection as raster print 'unequal projections' sourceSR = vlyr.GetSpatialRef() targetSR = osr.SpatialReference() targetSR.ImportFromWkt(rds.GetProjectionRef()) coordTrans = osr.CreateCoordinateTransformation(sourceSR, targetSR) """do the work""" global_src_extent = None mem_drv = ogr.GetDriverByName('Memory') driver = gdal.GetDriverByName('MEM') # Loop through vectors stats = [] feat = vlyr.GetNextFeature() while feat is not None: # print statement after each hunderds features fid = int(feat.GetFID()) if fid % 500 == 0: print("finished first %s features" % (fid)) if not global_src_extent: #print 'bbox county' # use local source extent # fastest option when you have fast disks and well indexed raster (ie tiled Geotiff) # advantage: each feature uses the smallest raster chunk # disadvantage: lots of reads on the source raster src_offset = bbox_to_pixel_offsets(rgt, feat.geometry().GetEnvelope()) src_array = rb.ReadAsArray(*src_offset) # calculate new geotransform of the feature subset new_gt = ((rgt[0] + (src_offset[0] * rgt[1])), rgt[1], 0.0, (rgt[3] + (src_offset[1] * rgt[5])), 0.0, rgt[5]) # Create a temporary vector layer in memory mem_ds = mem_drv.CreateDataSource('out') mem_layer = mem_ds.CreateLayer('poly', None, ogr.wkbPolygon) mem_layer.CreateFeature(feat.Clone()) # Rasterize it rvds = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) rvds.SetGeoTransform(new_gt) gdal.RasterizeLayer(rvds, [1], mem_layer, burn_values=[1]) rv_array = rvds.ReadAsArray() # Mask the source data array with our current feature # we take the logical_not to flip 0<->1 to get the correct mask effect # we also mask out nodata values explictly try: masked = np.ma.MaskedArray(src_array, mask=np.logical_or( src_array == nodata_value, np.logical_not(rv_array))) #print 'feature ID: ',int(feat.GetFID()) # GET STATISTICS FOR EACH COUNTY county_stats = getStatsCounty(cnty_array=masked, feat=feat) stats.append(county_stats) rvds = None mem_ds = None feat = vlyr.GetNextFeature() except np.ma.MaskError: # catch MaskError, ignore feature containing no valid corresponding raster data set # in my case the the most southern county of hainan is not totally within the raster extent print 'feature ID: ', fid, ' maskError, ignore county and lets continue' rvds = None mem_ds = None feat = vlyr.GetNextFeature() vds = None rds = None return stats #, src_array, rv_array, masked
def createDistanceTransform(rasterSrc, vectorSrc, npDistFileName='', units='pixels'): ## open source vector file that truth data source_ds = ogr.Open(vectorSrc) source_layer = source_ds.GetLayer() ## extract data from src Raster File to be emulated ## open raster file that is to be emulated srcRas_ds = gdal.Open(rasterSrc) cols = srcRas_ds.RasterXSize rows = srcRas_ds.RasterYSize noDataValue = 0 if units == 'meters': geoTrans, poly, ulX, ulY, lrX, lrY = gT.getRasterExtent(srcRas_ds) transform_WGS84_To_UTM, transform_UTM_To_WGS84, utm_cs = gT.createUTMTransform( poly) line = ogr.Geometry(ogr.wkbLineString) line.AddPoint(geoTrans[0], geoTrans[3]) line.AddPoint(geoTrans[0] + geoTrans[1], geoTrans[3]) line.Transform(transform_WGS84_To_UTM) metersIndex = line.Length() else: metersIndex = 1 ## create First raster memory layer memdrv = gdal.GetDriverByName('MEM') dst_ds = memdrv.Create('', cols, rows, 1, gdal.GDT_Byte) dst_ds.SetGeoTransform(srcRas_ds.GetGeoTransform()) dst_ds.SetProjection(srcRas_ds.GetProjection()) band = dst_ds.GetRasterBand(1) band.SetNoDataValue(noDataValue) gdal.RasterizeLayer(dst_ds, [1], source_layer, burn_values=[255]) srcBand = dst_ds.GetRasterBand(1) memdrv2 = gdal.GetDriverByName('MEM') prox_ds = memdrv2.Create('', cols, rows, 1, gdal.GDT_Int16) prox_ds.SetGeoTransform(srcRas_ds.GetGeoTransform()) prox_ds.SetProjection(srcRas_ds.GetProjection()) proxBand = prox_ds.GetRasterBand(1) proxBand.SetNoDataValue(noDataValue) options = ['NODATA=0'] ##compute distance to non-zero pixel values and scrBand and store in proxBand gdal.ComputeProximity(srcBand, proxBand, options) memdrv3 = gdal.GetDriverByName('MEM') proxIn_ds = memdrv3.Create('', cols, rows, 1, gdal.GDT_Int16) proxIn_ds.SetGeoTransform(srcRas_ds.GetGeoTransform()) proxIn_ds.SetProjection(srcRas_ds.GetProjection()) proxInBand = proxIn_ds.GetRasterBand(1) proxInBand.SetNoDataValue(noDataValue) options = ['NODATA=0', 'VALUES=0'] ##compute distance to zero pixel values and scrBand and store in proxInBand gdal.ComputeProximity(srcBand, proxInBand, options) proxIn = gdalnumeric.BandReadAsArray(proxInBand) proxOut = gdalnumeric.BandReadAsArray(proxBand) ##distance tranform is the distance to zero pixel values minus distance to non-zero pixel values proxTotal = proxIn.astype(float) - proxOut.astype(float) proxTotal = proxTotal * metersIndex if npDistFileName != '': np.save(npDistFileName, proxTotal) return proxTotal
def calcMangNDVIMangPxlFromCube(startYear, endYear, minLat, maxLat, minLon, maxLon, mangShpMask, ccThresholds, outStatsFile, outImgMask, outImgTypeMask): dc = datacube.Datacube(app='CalcAnnualMangroveExtent') start_of_epoch = str(startYear)+'-01-01' end_of_epoch = str(endYear)+'-12-31' query = {'time': (start_of_epoch, end_of_epoch),} query['x'] = (minLon, maxLon) query['y'] = (maxLat, minLat) query['crs'] = 'EPSG:4326' annualFC = dc.load(product='fc_percentile_albers_annual', group_by='solar_day', measurements=['PV_PC_10'], **query) annualWOFS = dc.load(product='wofs_annual_summary_temp', measurements=['frequency'], group_by='solar_day', **query) crswkt = annualFC.crs.wkt affine = annualFC.affine annualPV10th = annualFC.PV_PC_10 annualWetFreq = annualWOFS.frequency time_sorted = annualPV10th.time.argsort() annualPV10th = annualPV10th.isel(time=time_sorted) annualPV10th.attrs['affine'] = affine annualPV10th.attrs['crs'] = crswkt time_sorted = annualWetFreq.time.argsort() annualWetFreq = annualWetFreq.isel(time=time_sorted) annualWetFreq.attrs['affine'] = affine annualWetFreq.attrs['crs'] = crswkt # Define pixel size and NoData value of new raster xres = affine[0] yres = affine[4] noDataVal = 0 # Set the geotransform properties xcoord = annualFC.coords['x'].min() ycoord = annualFC.coords['y'].max() geotransform = (xcoord - (xres*0.5), xres, 0, ycoord + (yres*0.5), 0, yres) # Open the data source and read in the extent source_ds = ogr.Open(mangShpMask) source_layer = source_ds.GetLayer() source_srs = source_layer.GetSpatialRef() # Create the destination extent yt,xt = annualPV10th[0].shape # Set up 'in-memory' gdal image to rasterise the shapefile too target_ds = gdal.GetDriverByName('MEM').Create('', xt, yt, gdal.GDT_Byte) target_ds.SetGeoTransform(geotransform) albers = osr.SpatialReference() albers.ImportFromEPSG(3577) target_ds.SetProjection(albers.ExportToWkt()) band = target_ds.GetRasterBand(1) band.SetNoDataValue(noDataVal) # Rasterise gdal.RasterizeLayer(target_ds, [1], source_layer, burn_values=[1]) # Read as array the GMW mask gmwMaskArr = band.ReadAsArray() annualPV10th = annualPV10th.where(annualWetFreq<0.5) annualPV10th.data[numpy.isnan(annualPV10th.data)] = 0 mangAnnualFC = annualPV10th.where(gmwMaskArr == 1) mangAnnualFC.data[numpy.isnan(mangAnnualFC.data)] = 0 mangAnnualFC.attrs['affine'] = affine mangAnnualFC.attrs['crs'] = crswkt years = numpy.arange(startYear, endYear+1, 1) if len(years) != annualPV10th.shape[0]: raise Exception("The list of years specified is not equal to the number of annual layers within the datacube dataset read.") mangroveAreaPxlT = mangAnnualFC > ccThresholds[0] mangroveAreaPxlC = mangAnnualFC.copy(True) numThresVals = len(ccThresholds) for i in range(len(years)): mangroveAreaPxlC.data[i] = 0 for j in range(numThresVals): mangroveAreaPxlC.data[i][mangAnnualFC.data[i] > ccThresholds[j]] = j+1 mangroveAreaPxlC.attrs['affine'] = affine mangroveAreaPxlC.attrs['crs'] = crswkt mangroveAreaPxlT.attrs['affine'] = affine mangroveAreaPxlT.attrs['crs'] = crswkt albers = osr.SpatialReference() albers.ImportFromEPSG(3577) targetImgDS = gdal.GetDriverByName('GTIFF').Create(outImgMask, xt, yt, len(years), gdal.GDT_Byte, options=["TILED=YES", "COMPRESS=DEFLATE"]) targetImgDS.SetGeoTransform(geotransform) targetImgDS.SetProjection(albers.ExportToWkt()) targetTypeImgDS = gdal.GetDriverByName('GTIFF').Create(outImgTypeMask, xt, yt, len(years), gdal.GDT_Byte, options=["TILED=YES", "COMPRESS=DEFLATE"]) targetTypeImgDS.SetGeoTransform(geotransform) targetTypeImgDS.SetProjection(albers.ExportToWkt()) f = open(outStatsFile, 'w') f.write('Year, TotalPxlCount') for i in range(numThresVals): f.write(', PxlCountThres'+str(i+1)) f.write('\n') idx = 0 for yearVal in years: pxlCount = numpy.sum(mangroveAreaPxlT.data[idx]) f.write(str(yearVal)+', '+str(pxlCount)) for i in range(numThresVals): pxlCount = numpy.sum((mangroveAreaPxlC.data[idx] == i+1)) f.write(', ' + str(pxlCount)) f.write('\n') # Export the Total Mangrove Area image band = targetImgDS.GetRasterBand(idx+1) band.SetNoDataValue(noDataVal) band.WriteArray(mangroveAreaPxlT.data[idx]) band.SetDescription(str(yearVal)) # Export Mangrove Cover Type Area Image band = targetTypeImgDS.GetRasterBand(idx+1) band.SetNoDataValue(noDataVal) band.WriteArray(mangroveAreaPxlC.data[idx]) band.SetDescription(str(yearVal)) idx = idx + 1 f.write('\n') f.flush() f.close() targetImgDS = None targetTypeImgDS = None
def zonal_area(raster, shp): """ Converts a shp file into a raster mask. Masks off a polygon and extracts statistics from the area within the mask. Currently this only works with a shp file with one feature, however, it's written so that it could be adjusted to handle multiple features. :param raster: Raster class object. :param shp: Shp class object. :return: list of dict objects from computed stats. """ r_data = raster.data r_band = r_data.GetRasterBand(1) r_geotransform = raster.gt() v_data = shp.shp v_feature = v_data.GetLayer(0) nodata_value = r_band.GetNoDataValue() sourceprj = v_feature.GetSpatialRef() targetprj = osr.SpatialReference(wkt=r_data.GetProjection()) if sourceprj.ExportToProj4() != targetprj.ExportToProj4(): to_fill = ogr.GetDriverByName('Memory') ds = to_fill.CreateDataSource("project") outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon) feature = v_feature.GetFeature(0) transform = osr.CoordinateTransformation(sourceprj, targetprj) transformed = feature.GetGeometryRef() transformed.Transform(transform) geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb()) defn = outlayer.GetLayerDefn() feat = ogr.Feature(defn) feat.SetGeometry(geom) outlayer.CreateFeature(feat.Clone()) feat = None v_feature = outlayer src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent()) src_array = r_band.ReadAsArray(*src_offset) new_gt = ( (r_geotransform[0] + (src_offset[0] * r_geotransform[1])), r_geotransform[1], 0.0, (r_geotransform[3] + (src_offset[1] * r_geotransform[5])), 0.0, r_geotransform[5] ) driver = gdal.GetDriverByName('MEM') stats = [] v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte) v_to_r.SetGeoTransform(new_gt) gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1]) v_to_r_array = v_to_r.ReadAsArray() src_array = np.array(src_array, dtype=float) v_to_r_array = np.array(v_to_r.ReadAsArray(), dtype=float) masked = np.ma.MaskedArray( src_array, mask=np.logical_or( src_array == nodata_value, np.logical_not(v_to_r_array) ), fill_value=np.nan ) return float(masked.count() * 100)
def ogr_burn( lyr, clone, burn_value, file_out="", gdal_type=gdal.GDT_Byte, format="MEM", fill_value=255, attribute=None, ): """ ogr_burn burns polygons, points or lines from a geographical source (e.g. shapefile) onto a raster. Inputs: lyr: Shape layer (e.g. read from ogr object) to burn clone: clone file to use to define geotransform burn_value: burn value zlib=False: Set to True (recommended) to internally zip the data fill_value=255 Set the fill value gdal_type= gdal.GDT_Float32: Set the GDAL output data type. format='MEM': File format (if 'MEM' is used, data is only kept in memory) fill_value=255: fill value to use attribute=None: alternative to burn_value, if set to attribute name, this attribute is used for burning instead of burn_value Output: The function returns a GDAL-compatible file (default = in-memory) and the numpy array raster TO-DO add metadata and projection information to GeoTIFF """ # get geotransform ds_src = gdal.Open(clone, gdal.GA_ReadOnly) geotrans = ds_src.GetGeoTransform() xcount = ds_src.RasterXSize ycount = ds_src.RasterYSize # get the projection WktString = ds_src.GetProjection() srs = osr.SpatialReference() srs.ImportFromWkt(WktString) ds_src = None ds = gdal.GetDriverByName(format).Create(file_out, xcount, ycount, 1, gdal_type) ds.SetGeoTransform(geotrans) ds.SetProjection(srs.ExportToWkt()) # create for target raster the same projection as for the value raster raster_srs = osr.SpatialReference() # raster_srs.ImportFromWkt(raster.GetProjectionRef()) # target_ds.SetProjection(raster_srs.ExportToWkt()) # rasterize zone polygon to raster if attribute is None: gdal.RasterizeLayer(ds, [1], lyr, burn_values=[burn_value]) else: gdal.RasterizeLayer(ds, [1], lyr, options=["ATTRIBUTE={:s}".format(attribute)]) band = ds.GetRasterBand(1) band.SetNoDataValue(fill_value) if format == "MEM": return ds else: band = None ds = None
def processAlgorithm(self, parameters, context, feedback): raster_layer = self.parameterAsRasterLayer(parameters, self.INPUT_DEM, context) target_crs = raster_layer.crs() rasterPath = raster_layer.source() source = self.parameterAsSource(parameters, self.BOUNDARY_LAYER, context) if source is None: raise QgsProcessingException( self.invalidSourceError(parameters, self.BOUNDARY_LAYER)) step = self.parameterAsDouble(parameters, self.STEP, context) percentage = self.parameterAsBool(parameters, self.USE_PERCENTAGE, context) outputPath = self.parameterAsString(parameters, self.OUTPUT_DIRECTORY, context) rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly) geoTransform = rasterDS.GetGeoTransform() rasterBand = rasterDS.GetRasterBand(1) noData = rasterBand.GetNoDataValue() cellXSize = abs(geoTransform[1]) cellYSize = abs(geoTransform[5]) rasterXSize = rasterDS.RasterXSize rasterYSize = rasterDS.RasterYSize rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize * rasterYSize, geoTransform[0] + cellXSize * rasterXSize, geoTransform[3]) rasterGeom = QgsGeometry.fromRect(rasterBBox) crs = osr.SpatialReference() crs.ImportFromProj4(str(target_crs.toProj4())) memVectorDriver = ogr.GetDriverByName('Memory') memRasterDriver = gdal.GetDriverByName('MEM') features = source.getFeatures(QgsFeatureRequest().setDestinationCrs( target_crs, context.transformContext())) total = 100.0 / source.featureCount() if source.featureCount() else 0 for current, f in enumerate(features): if not f.hasGeometry(): continue if feedback.isCanceled(): break geom = f.geometry() intersectedGeom = rasterGeom.intersection(geom) if intersectedGeom.isEmpty(): feedback.pushInfo( self.tr('Feature {0} does not intersect raster or ' 'entirely located in NODATA area').format(f.id())) continue fName = os.path.join( outputPath, 'hystogram_%s_%s.csv' % (source.sourceName(), f.id())) ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.asWkt()) bbox = intersectedGeom.boundingBox() xMin = bbox.xMinimum() xMax = bbox.xMaximum() yMin = bbox.yMinimum() yMax = bbox.yMaximum() (startColumn, startRow) = raster.mapToPixel(xMin, yMax, geoTransform) (endColumn, endRow) = raster.mapToPixel(xMax, yMin, geoTransform) width = endColumn - startColumn height = endRow - startRow srcOffset = (startColumn, startRow, width, height) srcArray = rasterBand.ReadAsArray(*srcOffset) if srcOffset[2] == 0 or srcOffset[3] == 0: feedback.pushInfo( self.tr('Feature {0} is smaller than raster ' 'cell size').format(f.id())) continue newGeoTransform = (geoTransform[0] + srcOffset[0] * geoTransform[1], geoTransform[1], 0.0, geoTransform[3] + srcOffset[1] * geoTransform[5], 0.0, geoTransform[5]) memVDS = memVectorDriver.CreateDataSource('out') memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon) ft = ogr.Feature(memLayer.GetLayerDefn()) ft.SetGeometry(ogrGeom) memLayer.CreateFeature(ft) ft.Destroy() rasterizedDS = memRasterDriver.Create('', srcOffset[2], srcOffset[3], 1, gdal.GDT_Byte) rasterizedDS.SetGeoTransform(newGeoTransform) gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1]) rasterizedArray = rasterizedDS.ReadAsArray() srcArray = numpy.nan_to_num(srcArray) masked = numpy.ma.MaskedArray( srcArray, mask=numpy.logical_or(srcArray == noData, numpy.logical_not(rasterizedArray))) self.calculateHypsometry(f.id(), fName, feedback, masked, cellXSize, cellYSize, percentage, step) memVDS = None rasterizedDS = None feedback.setProgress(int(current * total)) rasterDS = None return {self.OUTPUT_DIRECTORY: outputPath}
def test_rasterize_5(): # Setup working spatial reference sr_wkt = 'LOCAL_CS["arbitrary"]' sr = osr.SpatialReference(sr_wkt) # Create a memory raster to rasterize into. target_ds = gdal.GetDriverByName('MEM').Create('', 100, 100, 3, gdal.GDT_Byte) target_ds.SetGeoTransform((1000, 1, 0, 1100, 0, -1)) target_ds.SetProjection(sr_wkt) # Create a memory layer to rasterize from. rast_ogr_ds = \ ogr.GetDriverByName('Memory').CreateDataSource('wrk') rast_mem_lyr = rast_ogr_ds.CreateLayer('poly', srs=sr) # Add polygons. wkt_geom = 'POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))' feat = ogr.Feature(rast_mem_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_mem_lyr.CreateFeature(feat) wkt_geom = 'POLYGON((1045 1050,1055 1050,1055 1020,1045 1020,1045 1050))' feat = ogr.Feature(rast_mem_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_mem_lyr.CreateFeature(feat) # Add linestrings. wkt_geom = 'LINESTRING(1000 1000, 1100 1050)' feat = ogr.Feature(rast_mem_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_mem_lyr.CreateFeature(feat) wkt_geom = 'LINESTRING(1005 1000, 1000 1050)' feat = ogr.Feature(rast_mem_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_mem_lyr.CreateFeature(feat) # Run the algorithm. err = gdal.RasterizeLayer(target_ds, [1, 2, 3], rast_mem_lyr, burn_values=[256, 110, -1], options=["MERGE_ALG=ADD"]) assert err == 0, 'got non-zero result code from RasterizeLayer' # Check results. expected = 13022 checksum = target_ds.GetRasterBand(2).Checksum() if checksum != expected: print(checksum) gdal.GetDriverByName('GTiff').CreateCopy('tmp/rasterize_5.tif', target_ds) pytest.fail('Did not get expected image checksum') _, maxval = target_ds.GetRasterBand(1).ComputeRasterMinMax() assert maxval == 255 minval, _ = target_ds.GetRasterBand(3).ComputeRasterMinMax() assert minval == 0