def test_sieve_mask(): """Test proper behavior of mask image, if passed int sieve""" with rasterio.drivers(): shape = (20, 20) image = numpy.zeros(shape, dtype=rasterio.ubyte) image[5:15, 5:15] = 1 image[1:3, 1:3] = 2 # Blank mask has no effect, only areas smaller than size will be # removed mask = numpy.ones(shape, dtype=rasterio.bool_) sieved_image = ftrz.sieve(image, 100, mask=mask) truth = numpy.zeros_like(image) truth[5:15, 5:15] = 1 assert numpy.array_equal(sieved_image, truth) # Only areas within the overlap of the mask and values will be kept mask = numpy.ones(shape, dtype=rasterio.bool_) mask[7:10, 7:10] = False sieved_image = ftrz.sieve(image, 100, mask=mask) truth = numpy.zeros_like(image) truth[7:10, 7:10] = 1 assert numpy.array_equal(sieved_image, truth) # mask of other type than rasterio.bool_ should fail mask = numpy.zeros(shape, dtype=rasterio.uint8) with pytest.raises(ValueError): ftrz.sieve(image, 100, mask=mask)
def test_sieve_invalid_mask_dtype(basic_image): """A mask that is the wrong dtype should fail.""" for dtype in ('int8', 'int16', 'int32'): with pytest.raises(ValueError): sieve( basic_image, basic_image.sum(), mask=np.ones(basic_image.shape, dtype=dtype) )
def test_sieve_invalid_mask_shape(basic_image): """A mask that is the wrong shape should fail.""" with pytest.raises(ValueError): sieve( basic_image, basic_image.sum(), mask=np.ones( (basic_image.shape[0] + 10, basic_image.shape[1] + 10), dtype=rasterio.bool_ ) )
def test_sieve_band(pixelated_image, pixelated_image_file): """Sieving a band from a raster file should match sieve of array.""" truth = sieve(pixelated_image, 9) with rasterio.open(pixelated_image_file) as src: band = rasterio.band(src, 1) assert np.array_equal(truth, sieve(band, 9)) # Mask band should also work but will be a no-op assert np.array_equal( pixelated_image, sieve(band, 9, mask=band) )
def test_sieve_internal_driver_manager(basic_image, pixelated_image): """ Sieve should work without explicitly calling driver manager """ assert np.array_equal( basic_image, sieve(pixelated_image, basic_image.sum()) )
def test_sieve_large(basic_image): """ Setting the size larger than size of feature should leave us an empty image. """ with rasterio.drivers(): assert not numpy.any(sieve(basic_image, basic_image.sum() + 1))
def test_sieve_large(basic_image): """ Setting the size larger than size of feature should leave us an empty image. """ with Env(): assert not np.any(sieve(basic_image, basic_image.sum() + 1))
def test_sieve_out(basic_image): """Output array passed in should match the returned array.""" output = np.zeros_like(basic_image) output[1:3, 1:3] = 5 sieved_image = sieve(basic_image, basic_image.sum(), out=output) assert np.array_equal(basic_image, sieved_image) assert np.array_equal(output, sieved_image)
def test_sieve_invalid_out(basic_image): """Output with different dtype or shape should fail.""" with pytest.raises(ValueError): sieve( basic_image, basic_image.sum(), out=np.zeros(basic_image.shape, dtype=rasterio.int32) ) with pytest.raises(ValueError): sieve( basic_image, basic_image.sum(), out=np.zeros( (basic_image.shape[0] + 10, basic_image.shape[1] + 10), dtype=rasterio.ubyte ) )
def test_sieve_blank_mask(basic_image): """A blank mask should have no effect.""" mask = np.ones(basic_image.shape, dtype=rasterio.bool_) assert np.array_equal( basic_image, sieve(basic_image, basic_image.sum(), mask=mask) )
def test_sieve(): """Test sieving a 10x10 feature from an ndarray.""" image = numpy.zeros((20, 20), dtype=rasterio.ubyte) image[5:15, 5:15] = 1 # An attempt to sieve out features smaller than 100 should not change the # image. with rasterio.drivers(): sieved_image = ftrz.sieve(image, 100) assert numpy.array_equal(sieved_image, image) # Setting the size to 100 should leave us an empty, False image. with rasterio.drivers(): sieved_image = ftrz.sieve(image, 101) assert not sieved_image.any()
def test_sieve_connectivity_queen(diagonal_image): """ Diagonals are connected, so feature is retained """ assert np.array_equal( diagonal_image, sieve(diagonal_image, diagonal_image.sum(), connectivity=8) )
def test_sieve_connectivity(): """Test proper behavior of connectivity""" image = numpy.zeros((20, 20), dtype=rasterio.ubyte) image[5:15:2, 5:15] = 1 image[6, 4] = 1 image[8, 15] = 1 image[10, 4] = 1 image[12, 15] = 1 # Diagonals not connected, all become small features that will be removed sieved_image = ftrz.sieve(image, 54, connectivity=4) assert not sieved_image.any() # Diagonals connected, everything is retained sieved_image = ftrz.sieve(image, 54, connectivity=8) assert numpy.array_equal(sieved_image, image)
def test_sieve(): """Test sieving a 10x10 feature from an ndarray.""" image = numpy.zeros((20, 20), dtype=rasterio.ubyte) image[5:15,5:15] = 127 # There should be some True pixels. assert image.any() # An attempt to sieve out features smaller than 100 should not change the # image. with rasterio.drivers(): sieved_image = ftrz.sieve(image, 100) assert ( list(map(list, numpy.where(sieved_image==127))) == list(map(list, numpy.where(image==127)))) # Setting the size to 100 should leave us an empty, False image. with rasterio.drivers(): sieved_image = ftrz.sieve(image, 101) assert not sieved_image.any()
def test_sieve_unsupported_dtypes(basic_image): """Unsupported data types should raise exceptions.""" unsupported_types = ( ('int8', -127), ('uint32', 4294967295), ('int64', 20439845334323), ('float16', -9343.232), ('float32', 1.434532), ('float64', -98332.133422114) ) for dtype, test_value in unsupported_types: with pytest.raises(ValueError): sieve( (basic_image).astype(dtype) * test_value, basic_image.sum() )
def test_sieve_blank_mask(basic_image): """ A blank mask should have no effect """ mask = numpy.ones(basic_image.shape, dtype=rasterio.bool_) with rasterio.drivers(): assert numpy.array_equal( basic_image, sieve(basic_image, basic_image.sum(), mask=mask) )
def test_sieve_out(basic_image): """ Output array passed in should match the returned array """ with rasterio.drivers(): output = numpy.zeros_like(basic_image) output[1:3, 1:3] = 5 sieved_image = sieve(basic_image, basic_image.sum(), out=output) assert numpy.array_equal(basic_image, sieved_image) assert numpy.array_equal(output, sieved_image)
def test_sieve_output(): """Test proper behavior of output image, if passed into sieve""" with rasterio.drivers(): shape = (20, 20) image = numpy.zeros(shape, dtype=rasterio.ubyte) image[5:15, 5:15] = 1 # Output should match returned array output = numpy.zeros_like(image) output[1:3, 1:3] = 5 sieved_image = ftrz.sieve(image, 100, output=output) assert numpy.array_equal(output, sieved_image) # Output of different dtype should fail output = numpy.zeros(shape, dtype=rasterio.int32) with pytest.raises(ValueError): ftrz.sieve(image, 100, output)
def test_sieve_small(basic_image, pixelated_image): """ Setting the size smaller than or equal to the size of the feature in the image should not change the image. """ assert np.array_equal( basic_image, sieve(pixelated_image, basic_image.sum()) )
def test_sieve_small(basic_image, pixelated_image): """ Setting the size smaller than or equal to the size of the feature in the image should not change the image. """ with rasterio.drivers(): assert numpy.array_equal( basic_image, sieve(pixelated_image, basic_image.sum()) )
def test_dtypes(): """Test data type support for sieve""" rows = cols = 10 with rasterio.drivers(): supported_types = ( ('int16', -32768), ('int32', -2147483648), ('uint8', 255), ('uint16', 65535) ) for dtype, test_value in supported_types: image = numpy.zeros((rows, cols), dtype=dtype) image[2:5, 2:5] = test_value # Sieve should return the original image sieved_image = ftrz.sieve(image, 2) assert numpy.array_equal(image, sieved_image) assert numpy.dtype(sieved_image.dtype).name == dtype # Sieve should return a blank image sieved_image = ftrz.sieve(image, 10) assert numpy.array_equal(numpy.zeros_like(image), sieved_image) assert numpy.dtype(sieved_image.dtype).name == dtype # Unsupported types should all raise exceptions unsupported_types = ( ('int8', -127), ('uint32', 4294967295), ('int64', 20439845334323), ('float16', -9343.232), ('float32', 1.434532), ('float64', -98332.133422114) ) for dtype, test_value in unsupported_types: with pytest.raises(ValueError): image = numpy.zeros((rows, cols), dtype=dtype) image[2:5, 2:5] = test_value sieved_image = ftrz.sieve(image, 2)
def test_sieve(): """Test sieving a 10x10 feature from an ndarray.""" image = numpy.zeros((20, 20), dtype=rasterio.ubyte) image[5:15, 5:15] = 1 # An attempt to sieve out features smaller than 100 should not change the # image. with rasterio.drivers(): sieved_image = ftrz.sieve(image, 100) assert numpy.array_equal(sieved_image, image) # Setting the size to 100 should leave us an empty, False image. with rasterio.drivers(): sieved_image = ftrz.sieve(image, 101) assert not sieved_image.any() # Invalid size value should fail for invalid_size in (0, 45.1234, image.size + 1): with pytest.raises(ValueError): sieved_image = ftrz.sieve(image, invalid_size)
def test_sieve_supported_dtypes(basic_image): """Supported data types should return valid results.""" supported_types = ( ('int16', -32768), ('int32', -2147483648), ('uint8', 255), ('uint16', 65535) ) for dtype, test_value in supported_types: truth = (basic_image).astype(dtype) * test_value sieved_image = sieve(truth, basic_image.sum()) assert np.array_equal(truth, sieved_image) assert np.dtype(sieved_image.dtype) == np.dtype(dtype)
def test_sieve_mask(basic_image): """ Only areas within the overlap of mask and input will be kept, so long as mask is a bool or uint8 dtype. """ mask = np.ones(basic_image.shape, dtype=rasterio.bool_) mask[4:5, 4:5] = False truth = basic_image * np.invert(mask) sieved_image = sieve(basic_image, basic_image.sum(), mask=mask) assert sieved_image.sum() > 0 assert np.array_equal( truth, sieved_image ) assert np.array_equal( truth.astype(rasterio.uint8), sieved_image )
def test_sieve_shade(): with rasterio.drivers(): with rasterio.open('tests/data/shade.tif') as src: sieved_image = ftrz.sieve(rasterio.band(src, 1), 42) assert sieved_image.shape == (1024, 1024)
def calc(ctx, command, files, output, name, dtype, masked, overwrite, mem_limit, creation_options): """A raster data calculator Evaluates an expression using input datasets and writes the result to a new dataset. Command syntax is lisp-like. An expression consists of an operator or function name and one or more strings, numbers, or expressions enclosed in parentheses. Functions include ``read`` (gets a raster array) and ``asarray`` (makes a 3-D array from 2-D arrays). \b * (read i) evaluates to the i-th input dataset (a 3-D array). * (read i j) evaluates to the j-th band of the i-th dataset (a 2-D array). * (take foo j) evaluates to the j-th band of a dataset named foo (see help on the --name option above). * Standard numpy array operators (+, -, *, /) are available. * When the final result is a list of arrays, a multiple band output file is written. * When the final result is a single array, a single band output file is written. Example: \b $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\ > /tmp/out.tif The command above produces a 3-band GeoTIFF with all values scaled by 0.95 and incremented by 2. \b $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\ > tests/data/shade.tif /tmp/out.tif The command above produces a 3-band RGB GeoTIFF, with red levels incremented by 125, from the single-band input. The maximum amount of memory used to perform caculations defaults to 64 MB. This number can be increased to improve speed of calculation. """ import numpy as np try: with ctx.obj['env']: output, files = resolve_inout(files=files, output=output, overwrite=overwrite) inputs = ([tuple(n.split('=')) for n in name] + [(None, n) for n in files]) sources = [rasterio.open(path) for name, path in inputs] first = sources[0] kwargs = first.profile kwargs.update(**creation_options) dtype = dtype or first.meta['dtype'] kwargs['dtype'] = dtype # Extend snuggs. snuggs.func_map['read'] = _read_array snuggs.func_map['band'] = lambda d, i: _get_bands( inputs, sources, d, i) snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d) snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args) snuggs.func_map['sieve'] = lambda *args: sieve(*args) dst = None # The windows iterator is initialized with a single sample. # The actual work windows will be added in the second # iteration of the loop. work_windows = [(None, Window(0, 0, 16, 16))] for ij, window in work_windows: ctxkwds = OrderedDict() for i, ((name, path), src) in enumerate(zip(inputs, sources)): # Using the class method instead of instance # method. Latter raises # # TypeError: astype() got an unexpected keyword # argument 'copy' # # possibly something to do with the instance being # a masked array. ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window) res = snuggs.eval(command, **ctxkwds) if (isinstance(res, np.ma.core.MaskedArray) and (tuple(LooseVersion(np.__version__).version) < (1, 9) or tuple(LooseVersion(np.__version__).version) > (1, 10))): res = res.filled(kwargs['nodata']) if len(res.shape) == 3: results = np.ndarray.astype(res, dtype, copy=False) else: results = np.asanyarray( [np.ndarray.astype(res, dtype, copy=False)]) # The first iteration is only to get sample results and from them # compute some properties of the output dataset. if dst is None: kwargs['count'] = results.shape[0] dst = rasterio.open(output, 'w', **kwargs) work_windows.extend( _chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit)) # In subsequent iterations we write results. else: dst.write(results, window=window) except snuggs.ExpressionError as err: click.echo("Expression Error:") click.echo(' %s' % err.text) click.echo(' ' + ' ' * err.offset + "^") click.echo(err) raise click.Abort() finally: if dst: dst.close() for src in sources: src.close()
def segmentation(model=None, params=None, src=None, bands=[1, 2, 3], image=None, mask=None, modal_radius=None, sieve_size=250): """ Segment the image. Segment the image using an algorithm from sklearn.segmentation. Parameters ---------- model: sklearn.segmentation model A model from sklearn.segmentation (e.g., slic, slic0, felzenswalb) params: sklearn.segmentation model parameters The unique parameters for the selected segmentation algorithm. Will be passed to the model as the kwargs argument. src: Rasterio datasource A rasterio-style datasource, created using: with rasterio.open('path') as src. There must be at least 3 bands of image data. If there are more than 3 bands, the first three will be used (see 'bands' parameter). This parameter is optional. **If it is not provided, then image and transform must be supplied.--really?? Not any more, right?** bands: array of integers The array of 3 bands to read from src as the RGB image for segmentation. image: numpy.array A 3-band (RGB) image used for segmentation. The shape of the image must be ordered as follows: (bands, rows, columns). This parameter is optional. mask: numpy.array A 1-band image mask. The shape of the mask must be ordered as follows: (rows, columns). This parameter is optional. modal_radius: integer Integer representing the radius of a raster disk (i.e., circular roving window). Optional. If not set, no modal filter will be applied. sieve_size: integer An integer representing the smallest number of pixels that will be included as a unique segment. Segments this size or smaller will be merged with the neighboring segment with the most pixels. Returns ------- numpy.array A numpy array arranged as rasterio would read it (bands=1, rows, cols) so it's ready to be written by rasterio """ if src is not None: img = bsq_to_bip(src.read(bands, masked=True)) mask = src.read_masks(1) mask[mask > 0] = 1 else: img = bsq_to_bip(image) mask[mask > 255] = 1 output = model(img, **params).astype('int32') while np.ndarray.min(output) < 1: output += 1 if modal_radius != None: output = modal(output.astype('int16'), selem=disk(modal_radius), mask=mask) # output = features.sieve(output, sieve_size, mask=mask, # connectivity=8) * mask output = features.sieve(output, sieve_size, mask=mask) * mask # output = label(output, connectivity=2) output = label(output, connectivity=1) output = bip_to_bsq(output[:, :, np.newaxis]) * mask return output
import numpy import rasterio from rasterio.features import sieve, shapes # Register GDAL and OGR drivers. with rasterio.drivers(): # Read a raster to be sieved. with rasterio.open('tests/data/shade.tif') as src: shade = src.read_band(1) # Print the number of shapes in the source raster. print("Slope shapes: %d" % len(list(shapes(shade)))) # Sieve out features 13 pixels or smaller. sieved = sieve(shade, 13) # Print the number of shapes in the sieved raster. print("Sieved (13) shapes: %d" % len(list(shapes(sieved)))) # Write out the sieved raster. with rasterio.open('example-sieved.tif', 'w', **src.meta) as dst: dst.write_band(1, sieved) # Dump out gdalinfo's report card and open (or "eog") the TIFF. print(subprocess.check_output( ['gdalinfo', '-stats', 'example-sieved.tif'])) subprocess.call(['open', 'example-sieved.tif'])
def test_sieve_connectivity_rook(diagonal_image): """Diagonals are not connected, so feature is removed.""" assert not np.any( sieve(diagonal_image, diagonal_image.sum(), connectivity=4) )
def test_sieve_connectivity_queen(diagonal_image): """Diagonals are connected, so feature is retained.""" assert np.array_equal( diagonal_image, sieve(diagonal_image, diagonal_image.sum(), connectivity=8) )
import subprocess import numpy import rasterio from rasterio.features import sieve, shapes os.chdir('/projectnb/landsat/projects/Colombia/Mosaics/M2B/') # Register GDAL and OGR drivers. with rasterio.drivers(): # Read a raster to be sieved. with rasterio.open('2001-01-01_seq.tif') as src: shade = src.read(1) # Sieve out features 13 pixels or smaller. sieved = sieve(shade, 4, out=numpy.zeros(src.shape, src.dtypes[0])) # Print the number of shapes in the sieved raster. print("Sieved (13) shapes: %d" % len(list(shapes(sieved)))) # Write out the sieved raster. kwargs = src.meta kwargs['transform'] = kwargs.pop('affine') with rasterio.open('example-sieved.tif', 'w', **kwargs) as dst: dst.write(sieved, indexes=1) # Dump out gdalinfo's report card and open (or "eog") the TIFF. #print(subprocess.check_output( # ['gdalinfo', '-stats', 'example-sieved.tif'])) #subprocess.call(['open', 'example-sieved.tif'])
def ClassifyValleyBottomTile(row: int, col: int, params: Parameters, drainage: SwathDrainageDict, **kwargs): """ Classify valley bottom features - tile algorithm """ dem_raster = params.dem.tilename(row=row, col=col, **kwargs) # drainage_raster = params.drainage.tilename(row=row, col=col, **kwargs) height_raster = params.height.tilename(row=row, col=col, **kwargs) axis_raster = params.axis.tilename(row=row, col=col, **kwargs) measure_raster = params.measure.tilename(row=row, col=col, **kwargs) distance_raster = params.distance.tilename(row=row, col=col, **kwargs) if not measure_raster.exists(): return output = params.output.tilename(row=row, col=col, **kwargs) slope = calculate_slope(dem_raster) swaths, measures = calculate_swaths(measure_raster, params.swath_length) # with rio.open(drainage_raster) as ds: # drainage = ds.read(1) with rio.open(axis_raster) as ds: axis = ds.read(1) axis_nodata = ds.nodata with rio.open(distance_raster) as ds: distance = np.abs(ds.read(1)) if isinstance(params.thresholds, Callable): resolve_thresholds = params.thresholds else: resolve_thresholds = make_resolve_thresholds_fun(params.thresholds) with rio.open(height_raster) as ds: height = ds.read(1) out = np.full_like(height, MASK_EXTERIOR, dtype='uint8') for ax in np.unique(axis): if ax == axis_nodata: continue for sw in np.unique(swaths[axis == ax]): if sw == 0 or (sw - 1) >= len(measures): continue sw_measure = measures[sw - 1] sw_mask = (axis == ax) & (swaths == sw) sw_drainage = drainage[ax, sw_measure] sw_height_max = params.height_max thresholds = resolve_thresholds(sw_drainage) out[sw_mask & (distance <= thresholds.distance_max) & (height <= sw_height_max)] = MASK_VALLEY_BOTTOM out[sw_mask & (out == MASK_VALLEY_BOTTOM) & (slope > thresholds.slope_max) & (distance > thresholds.distance_min)] = MASK_FLOOPLAIN_RELIEF out = features.sieve(out, params.patch_min_pixels) speedup.reclass_margin(out, MASK_FLOOPLAIN_RELIEF, MASK_EXTERIOR, MASK_SLOPE) for ax in np.unique(axis): if ax == axis_nodata: continue for sw in np.unique(swaths[axis == ax]): if sw == 0 or (sw - 1) >= len(measures): continue sw_measure = measures[sw - 1] sw_mask = (axis == ax) & (swaths == sw) sw_drainage = drainage[ax, sw_measure] sw_height_max = params.height_max thresholds = resolve_thresholds(sw_drainage) out[sw_mask & (out == MASK_SLOPE) & (height <= thresholds.height_max) & (distance > thresholds.distance_min)] = MASK_FLOOPLAIN_RELIEF out[sw_mask & (out == MASK_VALLEY_BOTTOM) & (height > thresholds.height_max) & (distance > thresholds.distance_min)] = MASK_TERRACE profile = ds.profile.copy() if not params.slope.none: profile.update(nodata=999.0, compress='deflate') with rio.open(params.slope.tilename(row=row, col=col, **kwargs), 'w', **profile) as dst: dst.write(slope, 1) profile.update(dtype='uint8', nodata=0, compress='deflate') with rio.open(output, 'w', **profile) as dst: dst.write(out, 1)
import subprocess import numpy import rasterio from rasterio.features import sieve, shapes # Register GDAL and OGR drivers. with rasterio.drivers(): # Read a raster to be sieved. with rasterio.open('rasterio/tests/data/shade.tif') as src: shade = src.read_band(1) # Print the number of shapes in the source raster. print "Slope shapes: %d" % len(list(shapes(shade))) # Sieve out features 13 pixels or smaller. sieved = sieve(shade, 13) # Print the number of shapes in the sieved raster. print "Sieved (13) shapes: %d" % len(list(shapes(sieved))) # Write out the sieved raster. with rasterio.open('example-sieved.tif', 'w', **src.meta) as dst: dst.write_band(1, sieved) # Dump out gdalinfo's report card and open (or "eog") the TIFF. print subprocess.check_output(['gdalinfo', '-stats', 'example-sieved.tif']) subprocess.call(['open', 'example-sieved.tif'])
def ValleyMaskTile(axis, row, col, threshold): tileset = config.tileset() def _tilename(name): return tileset.tilename(name, axis=axis, row=row, col=col) datafile = config.filename('metrics_talweg', axis=axis) hand_raster = _tilename('ax_nearest_height') swath_raster = _tilename('ax_swaths_refaxis') output_mask = _tilename('ax_valley_mask_refined') # output_height = _tilename('ax_nearest_height_refined') if not (os.path.exists(hand_raster) and os.path.exists(swath_raster)): return data = xr.open_dataset(datafile).swap_dims({'measure': 'swath'}) with rio.open(swath_raster) as ds: swaths = ds.read(1) swath_nodata = ds.nodata with rio.open(hand_raster) as ds: hand = ds.read(1) nodata = 255 out = np.full_like(hand, nodata, dtype='uint8') for swid in np.unique(swaths): if swid == swath_nodata: continue try: talheight = data['talweg_height_median'].sel(swath=swid).values except KeyError: talheight = np.nan if np.isnan(talheight): swath_mask = (swaths == swid) out[swath_mask] = 0 else: # TODO threshold = f(swid, bottom width, drainage area) minh = min(-talheight - threshold, -threshold) maxh = max(-talheight + threshold, threshold) swath_mask = (swaths == swid) bottom_mask = (hand >= minh) & (hand < maxh) out[swath_mask] = 1 out[swath_mask & bottom_mask] = 0 out = features.sieve(out, 100) # TODO externalize parameter speedup.reclass_margin(out, 1, 255, 2) profile = ds.profile.copy() profile.update(dtype='uint8', nodata=nodata, compress='deflate') with rio.open(output_mask, 'w', **profile) as dst: dst.write(out, 1)
def test_sieve_connectivity_rook(diagonal_image): """ Diagonals are not connected, so feature is removed """ assert not np.any( sieve(diagonal_image, diagonal_image.sum(), connectivity=4) )
def FlatMap(row, col, min_drainage, **kwargs): """ Flat areas continuous to drainage network Values : 1: Flat (low topography, continuous to drainage network) 2: Not Flat 3: Slope/Crest Flat 255: No-data """ from scipy.ndimage.morphology import binary_closing dem_raster = config.tileset().tilename('filled', row=row, col=col) flow_raster = config.tileset().tilename('flow', row=row, col=col) acc_raster = config.tileset().tilename('acc', row=row, col=col) output = config.tileset().tilename('flatmap', row=row, col=col) with rio.open(dem_raster) as ds: flow = ta.flowdir(ds.read(1), ds.nodata) flats = np.uint8(flow == 0) del flow # Sieve/Morphological Closing # structure = np.array([[0, 1, 0],[1, 1, 1],[0, 1, 0]], dtype=np.uint8) structure = np.array( [[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]], dtype=np.uint8) flats = np.uint8( binary_closing(flats, structure=structure, iterations=2)) flats = sieve(flats, 800) # Continuity with stream network derived from acc method = 1 if method == 1: # Method 1 Watershed max # 1 = flat, 2 = not flat flats = 2 - np.float32(flats) with rio.open(acc_raster) as ds2: mask = (ds2.read(1) >= min_drainage) out = np.zeros_like(mask, dtype=np.float32) out[mask] = 1 # flats[mask] = 1 with rio.open(flow_raster) as ds2: flow = ds2.read(1) ta.watershed_max(flow, out, flats, fill_value=0, feedback=None) out = np.uint8(out) out[mask] = flats[mask] out[out == 2] = 3 out[(flats == 1) & (out == 3)] = 2 out[flow == -1] = 255 elif method == 2: # Method 2 Shortest Max # 1 = stream, 2 = flat, 3 = not flat flats = 3 - np.float32(flats) with rio.open(acc_raster) as ds2: mask = (ds2.read(1) >= min_drainage) out = np.zeros_like(mask, dtype=np.float32) # out[mask] = 1 flats[mask] = 1 ta.shortest_max(flats, 0, 1, out=out, feedback=ta.ConsoleFeedback()) out = np.uint8(out) - 1 out[mask] = flats[mask] - 1 out[out == 2] = 3 out[(flats == 2) & (out == 3)] = 2 # End Method Options speedup.spread_connected(out, 1) profile = ds.profile.copy() profile.update(compress='deflate', dtype=np.uint8, nodata=255) with rio.open(output, 'w', **profile) as dst: dst.write(out, 1)
def calc(ctx, command, files, name, dtype): """A raster data calculator Evaluates an expression using input datasets and writes the result to a new dataset. Command syntax is lisp-like. An expression consists of an operator or function name and one or more strings, numbers, or expressions enclosed in parentheses. Functions include ``read`` (gets a raster array) and ``asarray`` (makes a 3-D array from 2-D arrays). \b * (read i) evaluates to the i-th input dataset (a 3-D array). * (read i j) evaluates to the j-th band of the i-th dataset (a 2-D array). * (take foo j) evaluates to the j-th band of a dataset named foo (see help on the --name option above). * Standard numpy array operators (+, -, *, /) are available. * When the final result is a list of arrays, a multi band output file is written. * When the final result is a single array, a single band output file is written. Example: \b $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\ > /tmp/out.tif Produces a 3-band GeoTIFF with all values scaled by 0.95 and incremented by 2. \b $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\ > tests/data/shade.tif /tmp/out.tif Produces a 3-band RGB GeoTIFF, with red levels incremented by 125, from the single-band input. """ import numpy as np verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1 logger = logging.getLogger('rio') try: with rasterio.drivers(CPL_DEBUG=verbosity > 2): output = files[-1] inputs = ([tuple(n.split('=')) for n in name] + [(None, n) for n in files[:-1]]) with rasterio.open(inputs[0][1]) as first: kwargs = first.meta kwargs['transform'] = kwargs.pop('affine') dtype = dtype or first.meta['dtype'] kwargs['dtype'] = dtype ctxkwds = {} for i, (name, path) in enumerate(inputs): with rasterio.open(path) as src: # Using the class method instead of instance # method. Latter raises # # TypeError: astype() got an unexpected keyword # argument 'copy' # # possibly something to do with the instance being # a masked array. ctxkwds[name or '_i%d' % (i+1)] = src.read() # Extend snuggs. snuggs.func_map['read'] = read_array snuggs.func_map['band'] = lambda d, i: get_bands(inputs, d, i) snuggs.func_map['bands'] = lambda d: get_bands(inputs, d) snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args) snuggs.func_map['sieve'] = lambda *args: sieve(*args) res = snuggs.eval(command, **ctxkwds) if len(res.shape) == 3: results = np.ndarray.astype(res, dtype, copy=False) else: results = np.asanyarray( [np.ndarray.astype(res, dtype, copy=False)]) kwargs['count'] = results.shape[0] with rasterio.open(output, 'w', **kwargs) as dst: dst.write(results) sys.exit(0) except snuggs.ExpressionError as err: click.echo("Expression Error:") click.echo(' %s' % err.text) click.echo(' ' + ' ' * err.offset + "^") click.echo(err) sys.exit(1) except Exception as err: t, v, tb = sys.exc_info() for line in traceback.format_exception_only(t, v): click.echo(line, nl=False) sys.exit(1)
def calc(ctx, command, files, name, dtype, masked): """A raster data calculator Evaluates an expression using input datasets and writes the result to a new dataset. Command syntax is lisp-like. An expression consists of an operator or function name and one or more strings, numbers, or expressions enclosed in parentheses. Functions include ``read`` (gets a raster array) and ``asarray`` (makes a 3-D array from 2-D arrays). \b * (read i) evaluates to the i-th input dataset (a 3-D array). * (read i j) evaluates to the j-th band of the i-th dataset (a 2-D array). * (take foo j) evaluates to the j-th band of a dataset named foo (see help on the --name option above). * Standard numpy array operators (+, -, *, /) are available. * When the final result is a list of arrays, a multi band output file is written. * When the final result is a single array, a single band output file is written. Example: \b $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\ > /tmp/out.tif Produces a 3-band GeoTIFF with all values scaled by 0.95 and incremented by 2. \b $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\ > tests/data/shade.tif /tmp/out.tif Produces a 3-band RGB GeoTIFF, with red levels incremented by 125, from the single-band input. """ import numpy as np verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1 logger = logging.getLogger('rio') try: with rasterio.drivers(CPL_DEBUG=verbosity > 2): output = files[-1] inputs = ([tuple(n.split('=')) for n in name] + [(None, n) for n in files[:-1]]) with rasterio.open(inputs[0][1]) as first: kwargs = first.meta kwargs['transform'] = kwargs.pop('affine') dtype = dtype or first.meta['dtype'] kwargs['dtype'] = dtype ctxkwds = {} for i, (name, path) in enumerate(inputs): with rasterio.open(path) as src: # Using the class method instead of instance # method. Latter raises # # TypeError: astype() got an unexpected keyword # argument 'copy' # # possibly something to do with the instance being # a masked array. ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked) # Extend snuggs. snuggs.func_map['read'] = read_array snuggs.func_map['band'] = lambda d, i: get_bands(inputs, d, i) snuggs.func_map['bands'] = lambda d: get_bands(inputs, d) snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args) snuggs.func_map['sieve'] = lambda *args: sieve(*args) res = snuggs.eval(command, **ctxkwds) if len(res.shape) == 3: results = np.ndarray.astype(res, dtype, copy=False) else: results = np.asanyarray( [np.ndarray.astype(res, dtype, copy=False)]) kwargs['count'] = results.shape[0] with rasterio.open(output, 'w', **kwargs) as dst: dst.write(results) sys.exit(0) except snuggs.ExpressionError as err: click.echo("Expression Error:") click.echo(' %s' % err.text) click.echo(' ' + ' ' * err.offset + "^") click.echo(err) sys.exit(1) except Exception as err: t, v, tb = sys.exc_info() for line in traceback.format_exception_only(t, v): click.echo(line, nl=False) sys.exit(1)
def test_dtypes(): """Test data type support for sieve""" rows = cols = 10 with rasterio.drivers(): supported_types = ( ('int16', -32768), ('int32', -2147483648), ('uint8', 255), ('uint16', 65535) ) for dtype, test_value in supported_types: image = numpy.zeros((rows, cols), dtype=dtype) image[2:5, 2:5] = test_value # Sieve should return the original image sieved_image = ftrz.sieve(image, 2) assert numpy.array_equal(image, sieved_image) assert numpy.dtype(sieved_image.dtype).name == dtype # Sieve should return a blank image sieved_image = ftrz.sieve(image, 10) assert numpy.array_equal(numpy.zeros_like(image), sieved_image) assert numpy.dtype(sieved_image.dtype).name == dtype # Unsupported types should all raise exceptions unsupported_types = ( ('int8', -127), ('uint32', 4294967295), ('int64', 20439845334323), ('float16', -9343.232), ('float32', 1.434532), ('float64', -98332.133422114) ) for dtype, test_value in unsupported_types: with pytest.raises(ValueError): image = numpy.zeros((rows, cols), dtype=dtype) image[2:5, 2:5] = test_value ftrz.sieve(image, 2) # Test mask types image = numpy.zeros((rows, cols), dtype='uint8') image.fill(255) supported_mask_types = ( ('bool', 1), ('uint8', 255) ) for dtype, mask_value in supported_mask_types: mask = numpy.zeros((rows, cols), dtype=dtype) mask[2:5, 2:5] = mask_value sieved_image = ftrz.sieve(image, 2, mask=mask) assert numpy.array_equal(image, sieved_image) unsupported_mask_types = ( ('int8', -127), ('int16', -32768) ) for dtype, mask_value in unsupported_mask_types: with pytest.raises(ValueError): mask = numpy.zeros((rows, cols), dtype=dtype) mask[2:5, 2:5] = mask_value ftrz.sieve(image, 2, mask=mask)
def test_sieve_connectivity_invalid(basic_image): with pytest.raises(ValueError): sieve(basic_image, 54, connectivity=12)
def sieve( self, size=2, mask=None, connectivity=4, file_path=None, driver="GTiff", nodata=None, dtype=None, **kwargs, ): """Replace pixels with their largest neighbor Thin wrapper around the rasterio.features.sieve method. Parameters ---------- size : integer (default 2) Minimum number of contigous pixels to retain mask : ndarray (optional, default None) Values of False or 0 will be excluded from the sieving process connectivity : integer (default 4) Use 4 or 8 pixel connectivity for grouping pixels into features. Default is 4. file_path : str (optional, default None) Optional path to save calculated Raster object. If not specified then a tempfile is used. driver : str (default 'GTiff') Named of GDAL-supported driver for file export. nodata : any number (optional, default None) Nodata value for new dataset. If not specified then a nodata value is set based on the minimum permissible value of the Raster's data type. dtype : str (optional, default None) Optionally specify a numpy compatible data type when saving to file. If not specified, a data type is set based on the data type of the RasterLayer. kwargs : opt Optional named arguments to pass to the format drivers. For example can be `compress="deflate"` to add compression. Returns ------- pyspatialml.RasterLayer Filled RasterLayer """ arr = sieve( source=self.read(masked=True), size=size, mask=mask, connectivity=connectivity, ) layer = self._write(arr, file_path, driver, dtype, nodata, **kwargs) return layer
def SlopeContinuity(bassin, zone, workdir, overwrite): """ DCOME """ from rasterio.features import sieve basename = 'SLOPE_CLS_CONTINUOUS.tif' flow_raster = os.path.join(workdir, bassin, zone, 'FLOW.tif') slope_raster = os.path.join(workdir, bassin, zone, 'SLOPE_CLS.tif') stream_network = os.path.join(workdir, bassin, zone, 'StreamNetwork.shp') output = os.path.join(workdir, bassin, zone, basename) if os.path.exists(output) and not overwrite: important('Output already exists : %s' % output) return with rio.open(slope_raster) as ds: slopes = sieve(ds.read(1), 800) streams = RasterizeStream(slopes, ds.transform, ds.nodata, stream_network, 0) # data = np.float32(slopes) + 1 # data[slopes == ds.nodata] = ds.nodata # data[streams == 1] = 0 # feedback = ta.ConsoleFeedback() # distance = np.float32(ta.shortest_distance(data, ds.nodata, 0, feedback=feedback)) # out = np.zeros_like(slopes, dtype=np.float32) # data[distance < 100] = 1 # data[streams == 1] = 0 # feedback = ta.ConsoleFeedback() # ta.shortest_max(data, ds.nodata, 0, out=out, feedback=feedback) # out = np.uint8(out) - 1 # out[distance < 100] = slopes[distance < 100] # out[slopes == ds.nodata] = ds.nodata out = np.zeros_like(slopes, dtype=np.float32) out[streams == 1] = 1 with rio.open(flow_raster) as ds2: flow = ds2.read(1) feedback = ta.ConsoleFeedback() ta.watershed_max(flow, out, np.float32(slopes), fill_value=0, feedback=feedback) out = np.uint8(out) # out[streams == 1] = slopes[streams == 1] out[slopes == ds.nodata] = ds.nodata profile = ds.profile.copy() profile.update(compress='deflate') with rio.open(output, 'w', **profile) as dst: dst.write(out, 1) success('Saved result to %s' % output)
def test_sieve_large(basic_image): """ Setting the size larger than size of feature should leave us an empty image. """ assert not np.any(sieve(basic_image, basic_image.sum() + 1))
def test_sieve_invalid_size(basic_image): for invalid_size in (0, 45.1234, basic_image.size + 1): with pytest.raises(ValueError): sieve(basic_image, invalid_size)
import numpy as np import rasterio from rasterio.features import sieve, shapes # Register GDAL and OGR drivers. with rasterio.Env(): # Read a raster to be sieved. with rasterio.open('tests/data/shade.tif') as src: shade = src.read(1) # Print the number of shapes in the source raster. print("Slope shapes: %d" % len(list(shapes(shade)))) # Sieve out features 13 pixels or smaller. sieved = sieve(shade, 13, out=np.zeros(src.shape, src.dtypes[0])) # Print the number of shapes in the sieved raster. print("Sieved (13) shapes: %d" % len(list(shapes(sieved)))) # Write out the sieved raster. kwargs = src.meta kwargs['transform'] = kwargs.pop('affine') with rasterio.open('example-sieved.tif', 'w', **kwargs) as dst: dst.write(sieved, indexes=1) # Dump out gdalinfo's report card and open (or "eog") the TIFF. print(subprocess.check_output(['gdalinfo', '-stats', 'example-sieved.tif'])) subprocess.call(['open', 'example-sieved.tif'])
def test_sieve_blank_mask(basic_image): """A blank mask should have no effect.""" mask = np.ones(basic_image.shape, dtype=rasterio.bool_) assert np.array_equal(basic_image, sieve(basic_image, basic_image.sum(), mask=mask))
import rasterio from rasterio.features import sieve, shapes # Register GDAL and OGR drivers. with rasterio.drivers(): # Read a raster to be sieved. with rasterio.open('tests/data/shade.tif') as src: shade = src.read(1) # Print the number of shapes in the source raster. print("Slope shapes: %d" % len(list(shapes(shade)))) # Sieve out features 13 pixels or smaller. sieved = sieve(shade, 13, out=numpy.zeros(src.shape, src.dtypes[0])) # Print the number of shapes in the sieved raster. print("Sieved (13) shapes: %d" % len(list(shapes(sieved)))) # Write out the sieved raster. kwargs = src.meta kwargs['transform'] = kwargs.pop('affine') with rasterio.open('example-sieved.tif', 'w', **kwargs) as dst: dst.write(sieved, indexes=1) # Dump out gdalinfo's report card and open (or "eog") the TIFF. print(subprocess.check_output( ['gdalinfo', '-stats', 'example-sieved.tif'])) subprocess.call(['open', 'example-sieved.tif'])
b1, b2, b3 = src.read() profile = src.profile profile.update( # dtype=rasterio.float32, dtype=rasterio.uint8, count=1, compress='lzw', nodata=0) red_index = numpy.zeros(b1.shape) red_index = (b1.astype(float) - b2.astype(float)) / (b1.astype(float) + b2.astype(float)) mascara_rojo = numpy.zeros(b1.shape) mascara_rojo = (red_index > 0.2) * 1 & (red_index < 0.7) * 1 & (b1 > 60) * 1 mascara_rojo = sieve(mascara_rojo.astype(rasterio.uint8), size=400, connectivity=8) # with rasterio.open('red_index.tif', 'w', **profile) as dst: # dst.write(red_index.astype(rasterio.float32), 1) with rasterio.open( '{0}{1}_mascara.tif'.format(salida, os.path.splitext(f)[0]), 'w', **profile) as dst: dst.nodata = 0 dst.write(mascara_rojo.astype(rasterio.uint8), 1) else: pass
def test_sieve_invalid_size(basic_image): with Env(): for invalid_size in (0, 45.1234, basic_image.size + 1): with pytest.raises(ValueError): sieve(basic_image, invalid_size)
def calc(ctx, command, files, output, name, dtype, masked, force_overwrite, creation_options): """A raster data calculator Evaluates an expression using input datasets and writes the result to a new dataset. Command syntax is lisp-like. An expression consists of an operator or function name and one or more strings, numbers, or expressions enclosed in parentheses. Functions include ``read`` (gets a raster array) and ``asarray`` (makes a 3-D array from 2-D arrays). \b * (read i) evaluates to the i-th input dataset (a 3-D array). * (read i j) evaluates to the j-th band of the i-th dataset (a 2-D array). * (take foo j) evaluates to the j-th band of a dataset named foo (see help on the --name option above). * Standard numpy array operators (+, -, *, /) are available. * When the final result is a list of arrays, a multi band output file is written. * When the final result is a single array, a single band output file is written. Example: \b $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\ > /tmp/out.tif Produces a 3-band GeoTIFF with all values scaled by 0.95 and incremented by 2. \b $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\ > tests/data/shade.tif /tmp/out.tif Produces a 3-band RGB GeoTIFF, with red levels incremented by 125, from the single-band input. """ import numpy as np try: with ctx.obj['env']: output, files = resolve_inout(files=files, output=output, force_overwrite=force_overwrite) inputs = ([tuple(n.split('=')) for n in name] + [(None, n) for n in files]) with rasterio.open(inputs[0][1]) as first: kwargs = first.meta kwargs.update(**creation_options) dtype = dtype or first.meta['dtype'] kwargs['dtype'] = dtype ctxkwds = {} for i, (name, path) in enumerate(inputs): with rasterio.open(path) as src: # Using the class method instead of instance # method. Latter raises # # TypeError: astype() got an unexpected keyword # argument 'copy' # # possibly something to do with the instance being # a masked array. ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked) # Extend snuggs. snuggs.func_map['read'] = read_array snuggs.func_map['band'] = lambda d, i: get_bands(inputs, d, i) snuggs.func_map['bands'] = lambda d: get_bands(inputs, d) snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args) snuggs.func_map['sieve'] = lambda *args: sieve(*args) res = snuggs.eval(command, **ctxkwds) if (isinstance(res, np.ma.core.MaskedArray) and (tuple(LooseVersion(np.__version__).version) < (1, 9) or tuple(LooseVersion(np.__version__).version) > (1, 10))): res = res.filled(kwargs['nodata']) if len(res.shape) == 3: results = np.ndarray.astype(res, dtype, copy=False) else: results = np.asanyarray( [np.ndarray.astype(res, dtype, copy=False)]) kwargs['count'] = results.shape[0] with rasterio.open(output, 'w', **kwargs) as dst: dst.write(results) except snuggs.ExpressionError as err: click.echo("Expression Error:") click.echo(' %s' % err.text) click.echo(' ' + ' ' * err.offset + "^") click.echo(err) raise click.Abort()
def main(cadastre_filepath, dst_tif_filepath, dst_shp_filepath, dst_res, num_patches, kernel_radius, urban_threshold, buffer_dist, dst_nodata): logger = logging.getLogger(__name__) logger.info("preparing raster agglomeration LULC from %s", cadastre_filepath) cadastre_arr, cadastre_transform = utils.rasterize_cadastre( cadastre_filepath, dst_res, dst_nodata) logger.info("rasterized cadastre vector LULC dataset to shape %s", str(cadastre_arr.shape)) # get the urban extent mask according to the criteria used in the "Atlas # of Urban Expansion, The 2016 Edition" by Angel, S. et al. uf = ufp.UrbanFootprinter(cadastre_arr, urban_classes=utils.URBAN_CLASSES, res=dst_res) urban_mask = uf.compute_footprint_mask(kernel_radius, urban_threshold, num_patches=num_patches, buffer_dist=buffer_dist) logger.info( "obtained extent of the %d largest urban cluster(s) (%d pixels)", num_patches, np.sum(urban_mask)) # exclude lake # TODO: arguments to customize `LULC_WATER_VAL` and `SIEVE_SIZE` label_arr = ndi.label(cadastre_arr == utils.LULC_WATER_VAL, ndi.generate_binary_structure(2, 2))[0] cluster_label = np.argmax(np.unique(label_arr, return_counts=True)[1][1:]) + 1 largest_cluster = np.array(label_arr == cluster_label, dtype=np.uint8) urban_mask = features.sieve( np.array(urban_mask.astype(bool) & ~largest_cluster.astype(bool), dtype=urban_mask.dtype), SIEVE_SIZE) # get window and transform of valid data points, i.e., the computed extent extent_window = windows.get_data_window(urban_mask, nodata=0) extent_transform = windows.transform(extent_window, cadastre_transform) dst_arr = np.where(urban_mask, cadastre_arr, dst_nodata)[windows.window_index(extent_window)] # dump it # ACHTUNG: use hardcoded CRS string (for the same CRS) to avoid issues with rio.open( dst_tif_filepath, 'w', driver='GTiff', width=extent_window.width, height=extent_window.height, count=1, crs=utils.CRS, # cadastre_gdf.crs transform=extent_transform, dtype=np.uint8, nodata=dst_nodata) as dst: dst.write(dst_arr, 1) logger.info("dumped rasterized dataset to %s", dst_tif_filepath) if dst_shp_filepath: # save the geometry extent # get the urban mask geometry # urban_mask_geom = uf.compute_footprint_mask_shp( # kernel_radius, # urban_threshold, # largest_patch_only=largest_patch_only, # buffer_dist=buffer_dist, # transform=extent_transform) urban_mask_geom = geometry.shape( max([(geom, val) for geom, val in features.shapes( np.array(dst_arr != dst_nodata, dtype=np.uint8), transform=extent_transform) if val == 1], key=lambda geom: len(geom[0]['coordinates']))[0]) # get the window and transform of the lake extent lake_mask = features.sieve(largest_cluster, SIEVE_SIZE) extent_window = windows.get_data_window(lake_mask, nodata=0) extent_transform = windows.transform(extent_window, cadastre_transform) lake_mask = lake_mask[windows.window_index(extent_window)] # get the lake mask geometry lake_mask_geom = geometry.shape( max([(geom, val) for geom, val in features.shapes( lake_mask, transform=extent_transform) if val == 1], key=lambda geom: len(geom[0]['coordinates']))[0]) # ACHTUNG: use hardcoded CRS string (for the same CRS) to avoid issues gpd.GeoSeries([urban_mask_geom, lake_mask_geom], crs=utils.CRS).to_file(dst_shp_filepath) logger.info("dumped extent geometry to %s", dst_shp_filepath)
def test_sieve_internal_driver_manager(basic_image, pixelated_image): """Sieve should work without explicitly calling driver manager.""" assert np.array_equal( basic_image, sieve(pixelated_image, basic_image.sum()) )
def DelineateZoneHydro(basin, zone, outlets_shapefile, root, overwrite): """ """ output = os.path.join(root, basin, zone, 'ZONEHYDRO_MNT.shp') flow_raster = os.path.join(root, basin, zone, 'FLOW.tif') if os.path.exists(output) and not overwrite: # click.secho('Output already exists : %s' % output, fg='yellow') return cdzonecnt = itertools.count(1) cdzones = defaultdict(lambda: next(cdzonecnt)) with rio.open(flow_raster) as ds: flow = ds.read(1) watersheds = np.zeros_like(flow, dtype=np.float32) def isdata(i, j): return i >= 0 and i < ds.height and j >= 0 and j < ds.width with fiona.open(outlets_shapefile) as fs: crs = fs.crs driver = fs.driver for feature in fs: cdzone = feature['properties']['CDZONEHYDR'] drainage = feature['properties']['DRAINAGE'] # if drainage > 0 and drainage < 200000: # continue x, y = feature['geometry']['coordinates'] i, j = ds.index(x, y) if isdata(i, j): # print(cdzone) idzone = cdzones[cdzone] watersheds[i, j] = idzone fill_value = 0 feedback = ta.SilentFeedback() ta.watershed(flow, watersheds, fill_value, feedback) profile = ds.profile.copy() profile.update(compress='deflate', nodata=0, dtype=np.float32) with rio.open(os.path.join(root, basin, zone, 'WATERSHEDS.tif'), 'w', **profile) as dst: dst.write(watersheds, 1) watersheds = sieve(np.int32(watersheds), 40) CdToZones = {v: k for k, v in cdzones.items()} schema = { 'geometry': 'Polygon', 'properties': [('CdZoneHydr', 'str:4')] } polygons = shapes(watersheds, (watersheds == cdzones[zone]), connectivity=8, transform=ds.transform) options = dict(driver=driver, crs=crs, schema=schema) with fiona.open(output, 'w', **options) as dst: for polygon, value in polygons: if value > 0: geom = asShape(polygon).buffer(0.0) feature = { 'geometry': geom.__geo_interface__, 'properties': { 'CdZoneHydr': CdToZones[value] } } dst.write(feature)
def ExtractZoneHydro(bassin, zone, root, output, flowdir, epsg, overwrite, overwrite_flow, debug): """ Re-délimitation des zones hydrographiques BDC à la résolution du MNT 1. Rastérise le réseau hydro cartographié en utilisant le même algorithme que celui utilisé dans l'algorithme `StreamToRaster` de la FCT 2. Calcule le plan de drainage en utilisant la variante de l'algorithme Priority Flood de Lindsay 3. Réalise une analyse de bassin versant (Watershed Analysis) 4. Vectorize le polygone correspondant à la zone indiquée """ logger.info('Processing zone %s' % zone) logger.info('Working Directory = %s' % root) raster_template = os.path.join(root, bassin, zone, 'DEM5M.tif') stream_network = os.path.join(root, bassin, zone, 'StreamNetwork.shp') outfilename = os.path.join(root, bassin, zone, output) if os.path.exists(outfilename) and not overwrite: # logger.warning('Output already exists : %s' % outfilename) return feedback = ta.SilentFeedback() ds = rio.open(raster_template) logger.info('Rasterize Stream Network') cdzonecnt = itertools.count(1) cdzones = defaultdict(lambda: next(cdzonecnt)) fill_value = 0 # burn_value = 1 junctions = np.zeros((ds.height, ds.width), dtype=np.uint8) streams = StreamToRaster(raster_template, ds.nodata, stream_network, fill_value, cdzones, junctions) if debug: filename = os.path.join(root, bassin, zone, 'STREAMS.tif') logger.debug('Write %s' % filename) profile = ds.profile.copy() profile.update(compress='deflate') with rio.open(filename, 'w', **profile) as dst: dst.write(streams, 1) filename = os.path.join(root, bassin, zone, 'JUNCTIONS.tif') logger.debug(' Write %s' % filename) profile.update(dtype=np.uint8, nodata=255, compress='deflate') with rio.open(filename, 'w', **profile) as dst: dst.write(junctions, 1) flow_raster = os.path.join(root, bassin, zone, flowdir) if os.path.exists(flow_raster) and not overwrite_flow: logger.info('Read Flow Direction from %s' % flow_raster) with rio.open(flow_raster) as src: flow = src.read(1) else: logger.info('Calculate Flow Direction') elevations = ds.read(1) zdelta = 0.0001 flow = ta.burnfill(elevations, streams, junctions, ds.nodata, zdelta, feedback=feedback) feedback.setProgress(100) logger.info('Save to %s' % flow_raster) profile = ds.profile.copy() profile.update(dtype=np.int16, nodata=-1, compress='deflate') with rio.open(flow_raster, 'w', **profile) as dst: dst.write(flow, 1) logger.info('Calculate Watersheds') watersheds = np.copy(streams) ta.watershed(flow, watersheds, fill_value, feedback) feedback.setProgress(100) if debug: filename = os.path.join(root, bassin, zone, 'WATERSHEDS.tif') logger.debug('Write %s' % filename) profile = ds.profile.copy() profile.update(dtype=np.int32, nodata=0, compress='deflate') with rio.open(filename, 'w', **profile) as dst: dst.write(np.int32(watersheds), 1) logger.info('Vectorize Polygons') watersheds = sieve(np.int32(watersheds), 400) schema = {'geometry': 'Polygon', 'properties': [('CdZoneHydr', 'str:4')]} crs = fiona.crs.from_epsg(epsg) CdToZones = {v: k for k, v in cdzones.items()} polygons = shapes(watersheds, (watersheds == cdzones[zone]), connectivity=8, transform=ds.transform) options = dict(driver='ESRI Shapefile', crs=crs, schema=schema) with fiona.open(outfilename, 'w', **options) as dst: for polygon, value in polygons: if value > 0: geom = asShape(polygon).buffer(0.0) feature = { 'geometry': geom.__geo_interface__, 'properties': { 'CdZoneHydr': CdToZones[value] } } dst.write(feature) logger.info('Everything Ok')