def test_route_to_multiple_error_raised_watershed_mask(): mg = RasterModelGrid((10, 10)) z = mg.add_zeros("topographic__elevation", at="node") z += mg.x_of_node + mg.y_of_node fa = FlowAccumulator(mg, flow_director="MFD") fa.run_one_step() with pytest.raises(NotImplementedError): get_watershed_mask(mg, 10)
def test_route_to_multiple_error_raised_watershed_mask(): mg = RasterModelGrid((10, 10)) z = mg.add_zeros("node", "topographic__elevation") z += mg.x_of_node + mg.y_of_node fa = FlowAccumulator(mg, flow_director="MFD") fa.run_one_step() with pytest.raises(NotImplementedError): get_watershed_mask(mg, 10)
def hypsometric_integral(grid, outlet_id): """Calculate the hypsometric integral for the model grid. The hypsometric integral :math:`I` is defined as .. math:: I = \\frac{\\frac{1}{N} \\sum_{i=0}^{N} \\left( z - \\min \\left( z\\right) \\right)} {\\max \\left(z\\right) - \\min \\left( z \\right)} Where :math:`z` is the set of elevation values, and :math:`N` is the number of elevation values. Parameters ---------- grid : Landlab model grid outlet_id : int Outlet id of the watershed. Returns ------- I : float The hypsometric integral. Examples -------- First an example that only uses the ``hypsometric_integral`` function. >>> from landlab import RasterModelGrid >>> from landlab.components import FlowAccumulator >>> from umami.calculations import hypsometric_integral >>> grid = RasterModelGrid((10, 10)) >>> z = grid.add_zeros("node", "topographic__elevation") >>> z += grid.x_of_node + grid.y_of_node >>> fa = FlowAccumulator(grid) >>> fa.run_one_step() >>> hypsometric_integral(grid, 1) 0.5 Next, the same calculations are shown as part of an umami ``Metric``. >>> from io import StringIO >>> from umami import Metric >>> file_like=StringIO(''' ... hi: ... _func: hypsometric_integral ... outlet_id: 1 ... ''') >>> metric = Metric(grid) >>> metric.add_from_file(file_like) >>> metric.names ['hi'] >>> metric.calculate() >>> metric.values [0.5] """ # Get just those elevation values that are within the watershed mask = get_watershed_mask(grid, outlet_id) vals = grid.at_node["topographic__elevation"][mask] # Get min and max min_val = np.amin(vals) max_val = np.amax(vals) # Calc and return the hypsometric_integral return np.mean(vals - min_val) / (max_val - min_val)
def kstest_watershed(model_grid, data_grid, field, outlet_id): """Calculate an Kolmogorov-Smirnov test for a watershed. ``kstest_watershed`` calculates the Kolmogorov-Smirnov test for goodness of fit using the function ``ks_2samp`` from ``scipy.stats``. Given an *outlet_id* it identifes a watershed mask for the *data_grid*. It then uses that mask on both the *data_grid* and the *model_grid*. If the field is "flow__distance", then this performs a KS test of the width function. Parameters ---------- model_grid : Landlab model grid data_grid : Landlab model grid field : str An at-node Landlab grid field that is present on the model grid. outlet_id : int Returns ------- out : float The KS test statistic Examples -------- First an example that only uses the ``kstest`` function. >>> import numpy as np >>> from landlab import RasterModelGrid >>> from landlab.components import FlowAccumulator >>> from umami.calculations import kstest_watershed >>> np.random.seed(42) >>> model = RasterModelGrid((10, 10)) >>> z_model = model.add_zeros("node", "topographic__elevation") >>> z_model += model.x_of_node + model.y_of_node >>> data = RasterModelGrid((10, 10)) >>> z_data = data.add_zeros("node", "topographic__elevation") >>> z_data += data.x_of_node + data.y_of_node >>> z_data[data.core_nodes] += np.random.random(data.core_nodes.shape) >>> data_fa = FlowAccumulator(data) >>> data_fa.run_one_step() >>> model_fa = FlowAccumulator(model) >>> model_fa.run_one_step() >>> np.round( ... kstest_watershed( ... model, ... data, ... "topographic__elevation", ... outlet_id=1), ... decimals=3) 0.5 Next, the same calculations are shown as part of an umami ``Residual``. >>> from io import StringIO >>> from umami import Residual >>> file_like=StringIO(''' ... ksw: ... _func: kstest_watershed ... outlet_id: 1 ... field: topographic__elevation ... ''') >>> residual = Residual(model, data) >>> residual.add_from_file(file_like) >>> residual.names ['ksw'] >>> residual.calculate() >>> np.round(residual.values, decimals=3) array([ 0.5]) """ mask = get_watershed_mask(data_grid, outlet_id) model_vals = model_grid.at_node[field][mask] data_vals = data_grid.at_node[field][mask] d, _ = ks_2samp(model_vals, data_vals) return d
def watershed_aggregation(grid, field, outlet_id, method, **kwds): """Aggregate a field value over a watershed. ``watershed_aggregation`` calculates aggregate values on the nodes in a watershed that drain to *outlet_id*. It supports all methods in the `numpy`_ namespace that reduce an array to a scalar. .. _numpy: https://numpy.org Parameters ---------- grid : Landlab model grid field : str An at-node Landlab grid field that is present on the model grid. outlet_id : int Outlet id of the watershed. method : str The name of a numpy namespace method. **kwds Any additional keyword arguments needed by the method. Returns ------- out : float The aggregate value. Examples -------- First an example that only uses the ``watershed_aggregation`` function. >>> from landlab import RasterModelGrid >>> from landlab.components import FlowAccumulator >>> from umami.calculations import watershed_aggregation >>> grid = RasterModelGrid((10, 10)) >>> z = grid.add_zeros("node", "topographic__elevation") >>> z += grid.x_of_node + grid.y_of_node >>> fa = FlowAccumulator(grid) >>> fa.run_one_step() ``watershed_aggregation`` supports all functions in the `numpy`_ namespace. Here we show `mean`_ and `percentile`_. The latter of which takes an additional argument, *q*. .. _numpy: https://numpy.org .. _mean: https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html .. _percentile: https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html >>> watershed_aggregation(grid, "topographic__elevation", 1, "mean") 5.0 >>> watershed_aggregation( ... grid, ... "topographic__elevation", ... 1, ... "percentile", ... q=10) 1.8 Next, the same calculations are shown as part of an umami ``Metric``. >>> from io import StringIO >>> from umami import Metric >>> file_like=StringIO(''' ... oid1_mean: ... _func: watershed_aggregation ... outlet_id: 1 ... method: mean ... field: topographic__elevation ... oid1_10thptile: ... _func: watershed_aggregation ... outlet_id: 1 ... method: percentile ... field: topographic__elevation ... q: 10 ... ''') >>> metric = Metric(grid) >>> metric.add_from_file(file_like) >>> metric.names ['oid1_mean', 'oid1_10thptile'] >>> metric.calculate() >>> metric.values [5.0, 1.8] """ mask = get_watershed_mask(grid, outlet_id) vals = grid.at_node[field][mask] return _aggregate(vals, method, **kwds)