예제 #1
0
def get_watershed_info(gpkg_path):
    """Query a BRAT database and get information about
    the watershed being run. Assumes that all watersheds
    except the one being run have been deleted.

    Arguments:
        database {str} -- Path to the BRAT SQLite database

    Returns:
        [tuple] -- WatershedID, max drainage area, EcoregionID with which
        the watershed is associated.
    """

    with SQLiteCon(gpkg_path) as database:
        database.curs.execute(
            'SELECT WatershedID, MaxDrainage, EcoregionID FROM Watersheds')
        row = database.curs.fetchone()
        watershed = row['WatershedID']
        max_drainage = row['MaxDrainage']
        ecoregion = row['EcoregionID']

    log = Logger('BRAT Run')

    if not watershed:
        raise Exception(
            'Missing watershed in BRAT datatabase {}'.format(database))

    if not max_drainage:
        log.warning('Missing max drainage for watershed {}'.format(watershed))

    if not ecoregion:
        raise Exception('Missing ecoregion for watershed {}'.format(watershed))

    return watershed, max_drainage, ecoregion
예제 #2
0
def watershed_parameters(flowlines_path, watershed_id):

    # Find the reach with max drainage
    with SQLiteCon(os.path.dirname(flowlines_path)) as database:
        database.curs.execute(
            'SELECT fid, geom FROM network order by TotDASqKm DESC limit 1')
        reach_id = database.curs.fetchone()['fid']

    # Load the geometry for this reach
    point = None
    with get_shp_or_gpkg(flowlines_path) as lyr:
        for feature, _counter, _progbar in lyr.iterate_features(
                attribute_filter='"fid" = {}'.format(reach_id)):
            line = loads(feature.GetGeometryRef().ExportToWkb())
            # point = line.interpolate(0.5, normalized=True)
            coord = list(line.coords)

            url = stream_stats_01_watershed.format('CA', coord[0][0],
                                                   coord[0][1])
            response = requests.get(url)
            data = json.loads(response.content)

            url2 = stream_stats_02_basin_chars.format('CA',
                                                      data['workspaceID'])
            response2 = requests.get(url2)
            data2 = json.loads(response.content)
            print(data)
예제 #3
0
def load_lookup(gpkg_path: str, sql: str) -> dict:
    """ Load one of the conservation tables as a dictionary

    Args:
        gpkg_path (str): [description]
        sql (str): [description]

    Returns:
        dict: [description]
    """

    with SQLiteCon(gpkg_path) as database:
        database.curs.execute(sql)
        return {row['Name']: row['ID'] for row in database.curs.fetchall()}
예제 #4
0
def admin_agency(database, reaches, ownership, results):

    log = Logger('Conflict')
    log.info(
        'Calculating land ownership administrating agency for {:,} reach(es)'.
        format(len(reaches)))

    # Load the agency lookups
    with SQLiteCon(database) as database:
        database.curs.execute(
            'SELECT AgencyID, Name, Abbreviation FROM Agencies')
        agencies = {
            row['Abbreviation']: {
                'AgencyID': row['AgencyID'],
                'Name': row['Name'],
                'RawGeometries': [],
                'GeometryUnion': None
            }
            for row in database.curs.fetchall()
        }

    with get_shp_or_gpkg(ownership) as ownership_lyr:

        progbar = ProgressBar(len(reaches), 50, "Calc administration agency")
        counter = 0

        # Loop over stream reaches and assign agency
        for reach_id, polyline in reaches.items():
            counter += 1
            progbar.update(counter)

            if reach_id not in results:
                results[reach_id] = {}

            mid_point = polyline.interpolate(0.5, normalized=True)
            results[reach_id]['AgencyID'] = None

            for feature, _counter, _progbar in ownership_lyr.iterate_features(
                    clip_shape=mid_point):
                agency = feature.GetField('ADMIN_AGEN')
                if agency not in agencies:
                    raise Exception(
                        'The ownership agency "{}" is not found in the BRAT SQLite database'
                        .format(agency))
                results[reach_id]['AgencyID'] = agencies[agency]['AgencyID']

    progbar.finish()
    log.info('Adminstration agency assignment complete')
예제 #5
0
def get_stream_buffers(gpkg_path):
    """Get the list of buffers used to sample the vegetation.
    Assumes that the vegetation has already been sample and that
    the streamside and riparian buffers are the only values in
    the ReachVegetation database table.

    Arguments:
        database {str} -- Path to the BRAT database

    Returns:
        [list] -- all discrete vegetation buffers
    """

    with SQLiteCon(gpkg_path) as database:
        database.curs.execute(
            'SELECT Buffer FROM ReachVegetation GROUP BY Buffer')
        return [row['Buffer'] for row in database.curs.fetchall()]
예제 #6
0
def calculate_vegetation_suitability(gpkg_path: str, buffer: float, epoch: str,
                                     veg_col: str, ecoregion: str) -> dict:
    """ Calculation vegetation suitability

    Args:
        gpkg_path ([type]): [description]
        buffer ([type]): [description]
        epoch ([type]): [description]
        veg_col ([type]): [description]
        ecoregion ([type]): [description]

    Raises:
        Exception: [description]

    Returns:
        [type]: [description]
    """

    log = Logger('Veg Suitability')
    log.info('Buffer: {}'.format(buffer))
    log.info('Epoch: {}'.format(epoch))
    log.info('Veg Column: {}'.format(veg_col))

    with SQLiteCon(gpkg_path) as database:

        # Get the database epoch that has the prefix 'EX' or 'HPE' in the metadata
        database.curs.execute('SELECT EpochID FROM Epochs WHERE Metadata = ?',
                              [epoch])
        epochid = database.curs.fetchone()['EpochID']
        if not epochid:
            raise Exception(
                'Missing epoch in database with metadata value of "{}"'.format(
                    epoch))

        database.curs.execute(
            'SELECT R.ReachID, Round(SUM(CAST(IFNULL(OverrideSuitability, DefaultSuitability) AS REAL) * CAST(CellCount AS REAL) / CAST(TotalCells AS REAL)), 2) AS VegSuitability'
            ' FROM vwReaches R'
            ' INNER JOIN Watersheds W ON R.WatershedID = W.WatershedID'
            ' INNER JOIN Ecoregions E ON W.EcoregionID = E.EcoregionID'
            ' INNER JOIN ReachVegetation RV ON R.ReachID = RV.ReachID'
            ' INNER JOIN VegetationTypes VT ON RV.VegetationID = VT.VegetationID'
            ' INNER JOIN Epochs EP ON VT.EpochID = EP.EpochID'
            ' INNER JOIN('
            ' SELECT ReachID, SUM(CellCount) AS TotalCells'
            ' FROM ReachVegetation RV'
            ' INNER JOIN VegetationTypes VT ON RV.VegetationID = VT.VegetationID'
            ' INNER JOIN Epochs E ON E.EpochID = VT.EpochID'
            ' WHERE Buffer = ? AND E.Metadata = ?'
            ' GROUP BY ReachID) AS RS ON R.ReachID = RS.ReachID'
            ' LEFT JOIN VegetationOverrides VO ON E.EcoregionID = VO.EcoregionID AND VT.VegetationID = VO.VegetationID'
            ' WHERE (Buffer = ?) AND (EP.Metadata = ?)  AND (E.EcoregionID = ? OR E.EcoregionID IS NULL)'
            ' GROUP BY R.ReachID', [buffer, epoch, buffer, epoch, ecoregion])
        results = {
            row['ReachID']: {
                veg_col: row['VegSuitability']
            }
            for row in database.curs.fetchall()
        }

    log.info('Vegetation suitability complete')
    return results
예제 #7
0
def output_vegetation_raster(gpkg_path, raster_path, output_path, epoch,
                             prefix, ecoregion):
    """Output a vegetation suitability raster. This has no direct use in the process
    but it's useful as a reference layer and visual aid.

    Arguments:
        database {str} -- Path to BRAT SQLite database
        raster_path {str} -- path to input raster
        output_path {str} -- path to output raster
        epoch {str} -- Label identifying either 'existing' or 'historic'. Used for log messages only.
        prefix {str} -- Either 'EX' for existing or 'HPE' for historic.
        ecoregion {int} -- Database ID of the ecoregion associated with the watershed
    """
    log = Logger('Veg Suitability Rasters')
    log.info('Epoch: {}'.format(epoch))

    with SQLiteCon(gpkg_path) as database:

        # Get the database epoch that has the prefix 'EX' or 'HPE' in the metadata
        database.curs.execute('SELECT EpochID FROM Epochs WHERE Metadata = ?',
                              [prefix])
        epochid = database.curs.fetchone()['EpochID']
        if not epochid:
            raise Exception(
                'Missing epoch in database with metadata value of "{}"'.format(
                    epoch))

        database.curs.execute(
            'SELECT VegetationID, EffectiveSuitability '
            'FROM vwVegetationSuitability '
            'WHERE EpochID = ? AND EcoregionID = ?', [epochid, ecoregion])
        results = {
            row['VegetationID']: row['EffectiveSuitability']
            for row in database.curs.fetchall()
        }

    def translate_suit(in_val, in_nodata, out_nodata):
        if in_val == in_nodata:
            return out_nodata
        elif in_val in results:
            return results[in_val]
        log.warning('Could not find {} VegetationID={}'.format(prefix, in_val))
        return -1

    vector = np.vectorize(translate_suit)

    with rasterio.open(raster_path) as source_ds:
        out_meta = source_ds.meta
        out_meta['dtype'] = 'int16'
        out_meta['nodata'] = -9999
        out_meta['compress'] = 'deflate'

        with rasterio.open(output_path, "w", **out_meta) as dest_ds:
            progbar = ProgressBar(
                len(list(source_ds.block_windows(1))), 50,
                "Writing Vegetation Raster: {}".format(epoch))
            counter = 0
            for ji, window in dest_ds.block_windows(1):
                progbar.update(counter)
                counter += 1
                in_data = source_ds.read(1, window=window, masked=True)

                # Fill the masked values with the appropriate nodata vals
                # Unthresholded in the base band (mostly for debugging)
                out_data = vector(in_data, source_ds.meta['nodata'],
                                  out_meta['nodata'])
                dest_ds.write(np.int16(out_data), window=window, indexes=1)

            progbar.finish()
예제 #8
0
def brat_build(huc: int, flowlines: Path, dem: Path, slope: Path, hillshade: Path,
               existing_veg: Path, historical_veg: Path, output_folder: Path,
               streamside_buffer: float, riparian_buffer: float,
               reach_codes: List[str], canal_codes: List[str], peren_codes: List[str],
               flow_areas: Path, waterbodies: Path, max_waterbody: float,
               valley_bottom: Path, roads: Path, rail: Path, canals: Path, ownership: Path,
               elevation_buffer: float, meta: Dict[str, str]):
    """Build a BRAT project by segmenting a reach network and copying
    all the necessary layers into the resultant BRAT project

    Arguments:
        huc {str} -- Watershed identifier
        flowlines {str} -- Path to the raw, original polyline flowline ShapeFile
        flow_areas {str} -- Path to the polygon ShapeFile that contains large river outlines
        waterbodies {str} -- Path to the polygon ShapeFile containing water bodies
        max_length {float} -- Maximum allowable flow line segment after segmentation
        min_length {float} -- Shortest allowable flow line segment after segmentation
        dem {str} -- Path to the DEM raster for the watershed
        slope {str} -- Path to the slope raster
        hillshade {str} -- Path to the DEM hillshade raster
        existing_veg {str} -- Path to the excisting vegetation raster
        historical_veg {str} -- Path to the historical vegetation raster
        output_folder {str} -- Output folder where the BRAT project will get created
        streamside_buffer {float} -- Streamside vegetation buffer (meters)
        riparian_buffer {float} -- Riparian vegetation buffer (meters)
        intermittent {bool} -- True to keep intermittent streams. False discard them.
        ephemeral {bool} -- True to keep ephemeral streams. False to discard them.
        max_waterbody {float} -- Area (sqm) of largest waterbody to be retained.
        valley_bottom {str} -- Path to valley bottom polygon layer.
        roads {str} -- Path to polyline roads ShapeFile
        rail {str} -- Path to polyline railway ShapeFile
        canals {str} -- Path to polyline canals ShapeFile
        ownership {str} -- Path to land ownership polygon ShapeFile
        elevation_buffer {float} -- Distance to buffer DEM when sampling elevation
        meta (Dict[str,str]): dictionary of riverscapes metadata key: value pairs
    """

    log = Logger("BRAT Build")
    log.info('HUC: {}'.format(huc))
    log.info('EPSG: {}'.format(cfg.OUTPUT_EPSG))

    project, _realization, proj_nodes = create_project(huc, output_folder)

    # Incorporate project metadata to the riverscapes project
    if meta is not None:
        project.add_metadata(meta)

    log.info('Adding input rasters to project')
    _dem_raster_path_node, dem_raster_path = project.add_project_raster(proj_nodes['Inputs'], LayerTypes['DEM'], dem)
    _existing_path_node, prj_existing_path = project.add_project_raster(proj_nodes['Inputs'], LayerTypes['EXVEG'], existing_veg)
    _historic_path_node, prj_historic_path = project.add_project_raster(proj_nodes['Inputs'], LayerTypes['HISTVEG'], historical_veg)
    project.add_project_raster(proj_nodes['Inputs'], LayerTypes['HILLSHADE'], hillshade)
    project.add_project_raster(proj_nodes['Inputs'], LayerTypes['SLOPE'], slope)
    project.add_project_geopackage(proj_nodes['Inputs'], LayerTypes['INPUTS'])
    project.add_project_geopackage(proj_nodes['Outputs'], LayerTypes['OUTPUTS'])

    inputs_gpkg_path = os.path.join(output_folder, LayerTypes['INPUTS'].rel_path)
    intermediates_gpkg_path = os.path.join(output_folder, LayerTypes['INTERMEDIATES'].rel_path)
    outputs_gpkg_path = os.path.join(output_folder, LayerTypes['OUTPUTS'].rel_path)

    # Make sure we're starting with empty/fresh geopackages
    GeopackageLayer.delete(inputs_gpkg_path)
    GeopackageLayer.delete(intermediates_gpkg_path)
    GeopackageLayer.delete(outputs_gpkg_path)

    # Copy all the original vectors to the inputs geopackage. This will ensure on same spatial reference
    source_layers = {
        'FLOWLINES': flowlines,
        'FLOW_AREA': flow_areas,
        'WATERBODIES': waterbodies,
        'VALLEY_BOTTOM': valley_bottom,
        'ROADS': roads,
        'RAIL': rail,
        'CANALS': canals
    }

    input_layers = {}
    for input_key, rslayer in LayerTypes['INPUTS'].sub_layers.items():
        input_layers[input_key] = os.path.join(inputs_gpkg_path, rslayer.rel_path)
        copy_feature_class(source_layers[input_key], input_layers[input_key], cfg.OUTPUT_EPSG)

    # Create the output feature class fields. Only those listed here will get copied from the source
    with GeopackageLayer(outputs_gpkg_path, layer_name=LayerTypes['OUTPUTS'].sub_layers['BRAT_GEOMETRY'].rel_path, delete_dataset=True) as out_lyr:
        out_lyr.create_layer(ogr.wkbMultiLineString, epsg=cfg.OUTPUT_EPSG, options=['FID=ReachID'], fields={
            'WatershedID': ogr.OFTString,
            'FCode': ogr.OFTInteger,
            'TotDASqKm': ogr.OFTReal,
            'GNIS_Name': ogr.OFTString,
            'NHDPlusID': ogr.OFTReal
        })

    metadata = {
        'BRAT_Build_DateTime': datetime.datetime.now().isoformat(),
        'Streamside_Buffer': streamside_buffer,
        'Riparian_Buffer': riparian_buffer,
        'Reach_Codes': reach_codes,
        'Canal_Codes': canal_codes,
        'Max_Waterbody': max_waterbody,
        'Elevation_Buffer': elevation_buffer
    }

    # Execute the SQL to create the lookup tables in the output geopackage
    watershed_name = create_database(huc, outputs_gpkg_path, metadata, cfg.OUTPUT_EPSG, os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'database', 'brat_schema.sql'))
    project.add_metadata({'Watershed': watershed_name})

    # Copy the reaches into the output feature class layer, filtering by reach codes
    reach_geometry_path = os.path.join(outputs_gpkg_path, LayerTypes['OUTPUTS'].sub_layers['BRAT_GEOMETRY'].rel_path)
    build_network(input_layers['FLOWLINES'], input_layers['FLOW_AREA'], reach_geometry_path, waterbodies_path=input_layers['WATERBODIES'], epsg=cfg.OUTPUT_EPSG, reach_codes=reach_codes, create_layer=False)

    with SQLiteCon(outputs_gpkg_path) as database:
        # Data preparation SQL statements to handle any weird attributes
        database.curs.execute('INSERT INTO ReachAttributes (ReachID, Orig_DA, iGeo_DA, ReachCode, WatershedID, StreamName) SELECT ReachID, TotDASqKm, TotDASqKm, FCode, WatershedID, GNIS_NAME FROM ReachGeometry')
        database.curs.execute('UPDATE ReachAttributes SET IsPeren = 1 WHERE (ReachCode IN ({}))'.format(','.join(peren_codes)))
        database.curs.execute('UPDATE ReachAttributes SET iGeo_DA = 0 WHERE iGeo_DA IS NULL')

        # Register vwReaches as a feature layer as well as its geometry column
        database.curs.execute("""INSERT INTO gpkg_contents (table_name, data_type, identifier, min_x, min_y, max_x, max_y, srs_id)
            SELECT 'vwReaches', data_type, 'Reaches', min_x, min_y, max_x, max_y, srs_id FROM gpkg_contents WHERE table_name = 'ReachGeometry'""")

        database.curs.execute("""INSERT INTO gpkg_geometry_columns (table_name, column_name, geometry_type_name, srs_id, z, m)
            SELECT 'vwReaches', column_name, geometry_type_name, srs_id, z, m FROM gpkg_geometry_columns WHERE table_name = 'ReachGeometry'""")

        database.conn.commit()

    # Calculate the geophysical properties slope, min and max elevations
    reach_geometry(reach_geometry_path, dem_raster_path, elevation_buffer)

    # Calculate the conflict attributes ready for conservation
    conflict_attributes(outputs_gpkg_path, reach_geometry_path,
                        input_layers['VALLEY_BOTTOM'], input_layers['ROADS'], input_layers['RAIL'], input_layers['CANALS'],
                        ownership, 30, 5, cfg.OUTPUT_EPSG, canal_codes, intermediates_gpkg_path)

    # Calculate the vegetation cell counts for each epoch and buffer
    for label, veg_raster in [('Existing Veg', prj_existing_path), ('Historical Veg', prj_historic_path)]:
        for buffer in [streamside_buffer, riparian_buffer]:
            vegetation_summary(outputs_gpkg_path, '{} {}m'.format(label, buffer), veg_raster, buffer)

    log.info('BRAT build completed successfully.')
def vegetation_summary(outputs_gpkg_path: str, label: str, veg_raster: str,
                       buffer: float):
    """ Loop through every reach in a BRAT database and
    retrieve the values from a vegetation raster within
    the specified buffer. Then store the tally of
    vegetation values in the BRAT database.

    Arguments:
        database {str} -- Path to BRAT database
        veg_raster {str} -- Path to vegetation raster
        buffer {float} -- Distance to buffer the reach polylines
    """

    log = Logger('Vegetation')
    log.info('Summarizing {}m vegetation buffer from {}'.format(
        int(buffer), veg_raster))

    # Retrieve the raster spatial reference and geotransformation
    dataset = gdal.Open(veg_raster)
    geo_transform = dataset.GetGeoTransform()
    raster_buffer = VectorBase.rough_convert_metres_to_raster_units(
        veg_raster, buffer)

    # Calculate the area of each raster cell in square metres
    conversion_factor = VectorBase.rough_convert_metres_to_raster_units(
        veg_raster, 1.0)
    cell_area = abs(geo_transform[1] * geo_transform[5]) / conversion_factor**2

    # Open the raster and then loop over all polyline features
    veg_counts = []
    with rasterio.open(veg_raster) as src, GeopackageLayer(
            os.path.join(outputs_gpkg_path, 'ReachGeometry')) as lyr:
        _srs, transform = VectorBase.get_transform_from_raster(
            lyr.spatial_ref, veg_raster)

        for feature, _counter, _progbar in lyr.iterate_features(label):
            reach_id = feature.GetFID()
            geom = feature.GetGeometryRef()
            if transform:
                geom.Transform(transform)

            polygon = VectorBase.ogr2shapely(geom).buffer(raster_buffer)

            try:
                # retrieve an array for the cells under the polygon
                raw_raster = mask(src, [polygon], crop=True)[0]
                mask_raster = np.ma.masked_values(raw_raster, src.nodata)
                # print(mask_raster)

                # Reclass the raster to dam suitability. Loop over present values for performance
                for oldvalue in np.unique(mask_raster):
                    if oldvalue is not np.ma.masked:
                        cell_count = np.count_nonzero(mask_raster == oldvalue)
                        veg_counts.append([
                            reach_id,
                            int(oldvalue), buffer, cell_count * cell_area,
                            cell_count
                        ])
            except Exception as ex:
                log.warning(
                    'Error obtaining vegetation raster values for ReachID {}'.
                    format(reach_id))
                log.warning(ex)

    # Write the reach vegetation values to the database
    # Because sqlite3 doesn't give us any feedback we do this in batches so that we can figure out what values
    # Are causing constraint errors
    with SQLiteCon(outputs_gpkg_path) as database:
        errs = 0
        batch_count = 0
        for veg_record in veg_counts:
            batch_count += 1
            try:
                database.conn.execute(
                    'INSERT INTO ReachVegetation (ReachID, VegetationID, Buffer, Area, CellCount) VALUES (?, ?, ?, ?, ?)',
                    veg_record)
            # Sqlite can't report on SQL errors so we have to print good log messages to help intuit what the problem is
            except sqlite3.IntegrityError as err:
                # THis is likely a constraint error.
                errstr = "Integrity Error when inserting records: ReachID: {} VegetationID: {}".format(
                    veg_record[0], veg_record[1])
                log.error(errstr)
                errs += 1
            except sqlite3.Error as err:
                # This is any other kind of error
                errstr = "SQL Error when inserting records: ReachID: {} VegetationID: {} ERROR: {}".format(
                    veg_record[0], veg_record[1], str(err))
                log.error(errstr)
                errs += 1
        if errs > 0:
            raise Exception(
                'Errors were found inserting records into the database. Cannot continue.'
            )
        database.conn.commit()

    log.info('Vegetation summary complete')
예제 #10
0
def brat_run(project_root, csv_dir):
    """Run the BRAT model and calculat dam capacity
    as well as conservation and restoration.

    Arguments:
        database {str} -- Path to existing BRAT SQLite database
        csv_dir {str} -- Path to the directory containing the BRAT lookup CSV data files
        shapefile {str} -- Path to the existing BRAT reach segment ShapeFile
        project_root {str} -- (Optional) path to Riverscapes project directory
    """

    log = Logger('BRAT Run')
    log.info('Starting BRAT run')

    project = RSProject(cfg, project_root)

    project.add_metadata({
        'BRATRunVersion': cfg.version,
        'BRATRunTimestamp': str(int(time.time()))
    })

    realizations = project.XMLBuilder.find('Realizations').findall('BRAT')
    if len(realizations) != 1:
        raise Exception(
            'Could not find a valid realization inside the existing brat project'
        )

    # Fetch some XML nodes we'll need to aleter
    r_node = realizations[0]
    input_node = r_node.find('Inputs')
    intermediate_node = r_node.find('Intermediates')
    outputs_node = r_node.find('Outputs')

    # Get the filepaths for the DB and shapefile
    gpkg_path = os.path.join(
        project.project_dir,
        r_node.find('Outputs/Geopackage[@id="OUTPUTS"]/Path').text)

    if not os.path.isfile(gpkg_path):
        raise Exception(
            'BRAT geopackage file missing at {}. You must run Brat Build first.'
            .format(gpkg_path))

    # Update any of the lookup tables we need
    update_database(gpkg_path, csv_dir)

    # Store the BRAT Run date time to the database (for reporting)
    store_metadata(gpkg_path, 'BRAT_Run_DateTime',
                   datetime.datetime.now().isoformat())

    watershed, max_drainage_area, ecoregion = get_watershed_info(gpkg_path)

    # Set database output columns to NULL before processing (note omission of string lookup fields from view)
    set_reach_fields_null(gpkg_path, output_fields[ogr.OFTReal])
    set_reach_fields_null(gpkg_path, output_fields[ogr.OFTInteger])

    # Calculate the low and high flow using regional discharge equations
    hydrology(gpkg_path, 'Low', watershed)
    hydrology(gpkg_path, '2', watershed)

    # Calculate the vegetation and combined FIS for the existing and historical vegetation epochs
    for epoch, prefix, ltype, orig_id in Epochs:

        # Calculate the vegetation suitability for each buffer
        [
            vegetation_suitability(gpkg_path, buffer, prefix, ecoregion)
            for buffer in get_stream_buffers(gpkg_path)
        ]

        # Run the vegetation and then combined FIS for this epoch
        vegetation_fis(gpkg_path, epoch, prefix)
        combined_fis(gpkg_path, epoch, prefix, max_drainage_area)

        orig_raster = os.path.join(
            project.project_dir,
            input_node.find('Raster[@id="{}"]/Path'.format(orig_id)).text)
        _veg_suit_raster_node, veg_suit_raster = project.add_project_raster(
            intermediate_node, LayerTypes[ltype], None, True)
        output_vegetation_raster(gpkg_path, orig_raster, veg_suit_raster,
                                 epoch, prefix, ecoregion)

    # Calculate departure from historical conditions
    with SQLiteCon(gpkg_path) as database:
        log.info('Calculating departure from historic conditions')
        database.curs.execute(
            'UPDATE ReachAttributes SET mCC_HisDep = mCC_HPE_CT - mCC_EX_CT WHERE (mCC_EX_CT IS NOT NULL) AND (mCC_HPE_CT IS NOT NULL)'
        )
        database.conn.commit()

    # Land use intesity, conservation and restoration
    land_use(gpkg_path, 100.0)
    conservation(gpkg_path)

    report_path = os.path.join(project.project_dir,
                               LayerTypes['BRAT_RUN_REPORT'].rel_path)
    project.add_report(outputs_node,
                       LayerTypes['BRAT_RUN_REPORT'],
                       replace=True)

    report = BratReport(gpkg_path, report_path, project)
    report.write()

    log.info('BRAT run complete')
예제 #11
0
def rvd(huc: int, flowlines_orig: Path, existing_veg_orig: Path, historic_veg_orig: Path,
        valley_bottom_orig: Path, output_folder: Path, reach_codes: List[str], flow_areas_orig: Path, waterbodies_orig: Path, meta=None):
    """[Generate segmented reaches on flowline network and calculate RVD from historic and existing vegetation rasters

    Args:
        huc (integer): Watershed ID
        flowlines_orig (Path): Segmented flowlines feature layer
        existing_veg_orig (Path): LANDFIRE version 2.00 evt raster, with adjacent xml metadata file
        historic_veg_orig (Path): LANDFIRE version 2.00 bps raster, with adjacent xml metadata file
        valley_bottom_orig (Path): Vbet polygon feature layer
        output_folder (Path): destination folder for project output
        reach_codes (List[int]): NHD reach codes for features to include in outputs
        flow_areas_orig (Path): NHD flow area polygon feature layer
        waterbodies (Path): NHD waterbodies polygon feature layer
        meta (Dict[str,str]): dictionary of riverscapes metadata key: value pairs
    """

    log = Logger("RVD")
    log.info('RVD v.{}'.format(cfg.version))

    try:
        int(huc)
    except ValueError:
        raise Exception('Invalid HUC identifier "{}". Must be an integer'.format(huc))

    if not (len(huc) == 4 or len(huc) == 8):
        raise Exception('Invalid HUC identifier. Must be four digit integer')

    safe_makedirs(output_folder)

    project, _realization, proj_nodes = create_project(huc, output_folder)

    # Incorporate project metadata to the riverscapes project
    if meta is not None:
        project.add_metadata(meta)

    log.info('Adding inputs to project')
    _prj_existing_path_node, prj_existing_path = project.add_project_raster(proj_nodes['Inputs'], LayerTypes['EXVEG'], existing_veg_orig)
    _prj_historic_path_node, prj_historic_path = project.add_project_raster(proj_nodes['Inputs'], LayerTypes['HISTVEG'], historic_veg_orig)

    # TODO: Don't forget the att_filter
    # _prj_flowlines_node, prj_flowlines = project.add_project_geopackage(proj_nodes['Inputs'], LayerTypes['INPUTS'], flowlines, att_filter="\"ReachCode\" Like '{}%'".format(huc))
    # Copy in the vectors we need
    inputs_gpkg_path = os.path.join(output_folder, LayerTypes['INPUTS'].rel_path)
    intermediates_gpkg_path = os.path.join(output_folder, LayerTypes['INTERMEDIATES'].rel_path)
    outputs_gpkg_path = os.path.join(output_folder, LayerTypes['OUTPUTS'].rel_path)

    # Make sure we're starting with empty/fresh geopackages
    GeopackageLayer.delete(inputs_gpkg_path)
    GeopackageLayer.delete(intermediates_gpkg_path)
    GeopackageLayer.delete(outputs_gpkg_path)

    # Copy our input layers and also find the difference in the geometry for the valley bottom
    flowlines_path = os.path.join(inputs_gpkg_path, LayerTypes['INPUTS'].sub_layers['FLOWLINES'].rel_path)
    vbottom_path = os.path.join(inputs_gpkg_path, LayerTypes['INPUTS'].sub_layers['VALLEY_BOTTOM'].rel_path)

    copy_feature_class(flowlines_orig, flowlines_path, epsg=cfg.OUTPUT_EPSG)
    copy_feature_class(valley_bottom_orig, vbottom_path, epsg=cfg.OUTPUT_EPSG)

    with GeopackageLayer(flowlines_path) as flow_lyr:
        # Set the output spatial ref as this for the whole project
        out_srs = flow_lyr.spatial_ref
        meter_conversion = flow_lyr.rough_convert_metres_to_vector_units(1)
        distance_buffer = flow_lyr.rough_convert_metres_to_vector_units(1)

    # Transform issues reading 102003 as espg id. Using sr wkt seems to work, however arcgis has problems loading feature classes with this method...
    raster_srs = ogr.osr.SpatialReference()
    ds = gdal.Open(prj_existing_path, 0)
    raster_srs.ImportFromWkt(ds.GetProjectionRef())
    raster_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
    transform_shp_to_raster = VectorBase.get_transform(out_srs, raster_srs)

    gt = ds.GetGeoTransform()
    cell_area = ((gt[1] / meter_conversion) * (-gt[5] / meter_conversion))

    # Create the output feature class fields
    with GeopackageLayer(outputs_gpkg_path, layer_name='ReachGeometry', delete_dataset=True) as out_lyr:
        out_lyr.create_layer(ogr.wkbMultiLineString, spatial_ref=out_srs, options=['FID=ReachID'], fields={
            'GNIS_NAME': ogr.OFTString,
            'ReachCode': ogr.OFTString,
            'TotDASqKm': ogr.OFTReal,
            'NHDPlusID': ogr.OFTReal,
            'WatershedID': ogr.OFTInteger
        })

    metadata = {
        'RVD_DateTime': datetime.datetime.now().isoformat(),
        'Reach_Codes': reach_codes
    }

    # Execute the SQL to create the lookup tables in the RVD geopackage SQLite database
    watershed_name = create_database(huc, outputs_gpkg_path, metadata, cfg.OUTPUT_EPSG, os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'database', 'rvd_schema.sql'))
    project.add_metadata({'Watershed': watershed_name})

    geom_vbottom = get_geometry_unary_union(vbottom_path, spatial_ref=raster_srs)

    flowareas_path = None
    if flow_areas_orig:
        flowareas_path = os.path.join(inputs_gpkg_path, LayerTypes['INPUTS'].sub_layers['FLOW_AREA'].rel_path)
        copy_feature_class(flow_areas_orig, flowareas_path, epsg=cfg.OUTPUT_EPSG)
        geom_flow_areas = get_geometry_unary_union(flowareas_path)
        # Difference with existing vbottom
        geom_vbottom = geom_vbottom.difference(geom_flow_areas)
    else:
        del LayerTypes['INPUTS'].sub_layers['FLOW_AREA']

    waterbodies_path = None
    if waterbodies_orig:
        waterbodies_path = os.path.join(inputs_gpkg_path, LayerTypes['INPUTS'].sub_layers['WATERBODIES'].rel_path)
        copy_feature_class(waterbodies_orig, waterbodies_path, epsg=cfg.OUTPUT_EPSG)
        geom_waterbodies = get_geometry_unary_union(waterbodies_path)
        # Difference with existing vbottom
        geom_vbottom = geom_vbottom.difference(geom_waterbodies)
    else:
        del LayerTypes['INPUTS'].sub_layers['WATERBODIES']

    # Add the inputs to the XML
    _nd, _in_gpkg_path, _sublayers = project.add_project_geopackage(proj_nodes['Inputs'], LayerTypes['INPUTS'])

    # Filter the flow lines to just the required features and then segment to desired length
    # TODO: These are brat methods that need to be refactored to use VectorBase layers
    cleaned_path = os.path.join(outputs_gpkg_path, 'ReachGeometry')
    build_network(flowlines_path, flowareas_path, cleaned_path, waterbodies_path=waterbodies_path, epsg=cfg.OUTPUT_EPSG, reach_codes=reach_codes, create_layer=False)

    # Generate Voroni polygons
    log.info("Calculating Voronoi Polygons...")

    # Add all the points (including islands) to the list
    flowline_thiessen_points_groups = centerline_points(cleaned_path, distance_buffer, transform_shp_to_raster)
    flowline_thiessen_points = [pt for group in flowline_thiessen_points_groups.values() for pt in group]
    simple_save([pt.point for pt in flowline_thiessen_points], ogr.wkbPoint, raster_srs, "Thiessen_Points", intermediates_gpkg_path)

    # Exterior is the shell and there is only ever 1
    myVorL = NARVoronoi(flowline_thiessen_points)

    # Generate Thiessen Polys
    myVorL.createshapes()

    # Dissolve by flowlines
    log.info("Dissolving Thiessen Polygons")
    dissolved_polys = myVorL.dissolve_by_property('fid')

    # Clip Thiessen Polys
    log.info("Clipping Thiessen Polygons to Valley Bottom")

    clipped_thiessen = clip_polygons(geom_vbottom, dissolved_polys)

    # Save Intermediates
    simple_save(clipped_thiessen.values(), ogr.wkbPolygon, raster_srs, "Thiessen", intermediates_gpkg_path)
    simple_save(dissolved_polys.values(), ogr.wkbPolygon, raster_srs, "ThiessenPolygonsDissolved", intermediates_gpkg_path)
    simple_save(myVorL.polys, ogr.wkbPolygon, raster_srs, "ThiessenPolygonsRaw", intermediates_gpkg_path)
    _nd, _inter_gpkg_path, _sublayers = project.add_project_geopackage(proj_nodes['Intermediates'], LayerTypes['INTERMEDIATES'])

    # OLD METHOD FOR AUDIT
    # dissolved_polys2 = dissolve_by_points(flowline_thiessen_points_groups, myVorL.polys)
    # simple_save(dissolved_polys2.values(), ogr.wkbPolygon, out_srs, "ThiessenPolygonsDissolved_OLD", intermediates_gpkg_path)

    # Load Vegetation Rasters
    log.info(f"Loading Existing and Historic Vegetation Rasters")
    vegetation = {}
    vegetation["EXISTING"] = load_vegetation_raster(prj_existing_path, outputs_gpkg_path, True, output_folder=os.path.join(output_folder, 'Intermediates'))
    vegetation["HISTORIC"] = load_vegetation_raster(prj_historic_path, outputs_gpkg_path, False, output_folder=os.path.join(output_folder, 'Intermediates'))

    for epoch in vegetation.keys():
        for name in vegetation[epoch].keys():
            if not f"{epoch}_{name}" == "HISTORIC_LUI":
                project.add_project_raster(proj_nodes['Intermediates'], LayerTypes[f"{epoch}_{name}"])

    if vegetation["EXISTING"]["RAW"].shape != vegetation["HISTORIC"]["RAW"].shape:
        raise Exception('Vegetation raster shapes are not equal Existing={} Historic={}. Cannot continue'.format(vegetation["EXISTING"]["RAW"].shape, vegetation["HISTORIC"]["RAW"].shape))

    # Vegetation zone calculations
    riparian_zone_arrays = {}
    riparian_zone_arrays["RIPARIAN_ZONES"] = ((vegetation["EXISTING"]["RIPARIAN"] + vegetation["HISTORIC"]["RIPARIAN"]) > 0) * 1
    riparian_zone_arrays["NATIVE_RIPARIAN_ZONES"] = ((vegetation["EXISTING"]["NATIVE_RIPARIAN"] + vegetation["HISTORIC"]["NATIVE_RIPARIAN"]) > 0) * 1
    riparian_zone_arrays["VEGETATION_ZONES"] = ((vegetation["EXISTING"]["VEGETATED"] + vegetation["HISTORIC"]["VEGETATED"]) > 0) * 1

    # Save Intermediate Rasters
    for name, raster in riparian_zone_arrays.items():
        save_intarr_to_geotiff(raster, os.path.join(output_folder, "Intermediates", f"{name}.tif"), prj_existing_path)
        project.add_project_raster(proj_nodes['Intermediates'], LayerTypes[name])

    # Calculate Riparian Departure per Reach
    riparian_arrays = {f"{epoch.capitalize()}{(name.capitalize()).replace('Native_riparian', 'NativeRiparian')}Mean": array for epoch, arrays in vegetation.items() for name, array in arrays.items() if name in ["RIPARIAN", "NATIVE_RIPARIAN"]}

    # Vegetation Cell Counts
    raw_arrays = {f"{epoch}": array for epoch, arrays in vegetation.items() for name, array in arrays.items() if name == "RAW"}

    # Generate Vegetation Conversions
    vegetation_change = (vegetation["HISTORIC"]["CONVERSION"] - vegetation["EXISTING"]["CONVERSION"])
    save_intarr_to_geotiff(vegetation_change, os.path.join(output_folder, "Intermediates", "Conversion_Raster.tif"), prj_existing_path)
    project.add_project_raster(proj_nodes['Intermediates'], LayerTypes['VEGETATION_CONVERSION'])

    # load conversion types dictionary from database
    conn = sqlite3.connect(outputs_gpkg_path)
    conn.row_factory = dict_factory
    curs = conn.cursor()
    curs.execute('SELECT * FROM ConversionTypes')
    conversion_classifications = curs.fetchall()
    curs.execute('SELECT * FROM vwConversions')
    conversion_ids = curs.fetchall()

    # Split vegetation change classes into binary arrays
    vegetation_change_arrays = {
        c['FieldName']: (vegetation_change == int(c["TypeValue"])) * 1 if int(c["TypeValue"]) in np.unique(vegetation_change) else None
        for c in conversion_classifications
    }

    # Calcuate average and unique cell counts  per reach
    progbar = ProgressBar(len(clipped_thiessen.keys()), 50, "Extracting array values by reach...")
    counter = 0
    discarded = 0
    with rasterio.open(prj_existing_path) as dataset:
        unique_vegetation_counts = {}
        reach_average_riparian = {}
        reach_average_change = {}
        for reachid, poly in clipped_thiessen.items():
            counter += 1
            progbar.update(counter)
            # we can discount a lot of shapes here.
            if not poly.is_valid or poly.is_empty or poly.area == 0 or poly.geom_type not in ["Polygon", "MultiPolygon"]:
                discarded += 1
                continue

            raw_values_unique = {}
            change_values_mean = {}
            riparian_values_mean = {}
            reach_raster = np.ma.masked_invalid(
                features.rasterize(
                    [poly],
                    out_shape=dataset.shape,
                    transform=dataset.transform,
                    all_touched=True,
                    fill=np.nan))
            for raster_name, raster in raw_arrays.items():
                if raster is not None:
                    current_raster = np.ma.masked_array(raster, mask=reach_raster.mask)
                    raw_values_unique[raster_name] = np.unique(np.ma.filled(current_raster, fill_value=0), return_counts=True)
                else:
                    raw_values_unique[raster_name] = []
            for raster_name, raster in riparian_arrays.items():
                if raster is not None:
                    current_raster = np.ma.masked_array(raster, mask=reach_raster.mask)
                    riparian_values_mean[raster_name] = np.ma.mean(current_raster)
                else:
                    riparian_values_mean[raster_name] = 0.0
            for raster_name, raster in vegetation_change_arrays.items():
                if raster is not None:
                    current_raster = np.ma.masked_array(raster, mask=reach_raster.mask)
                    change_values_mean[raster_name] = np.ma.mean(current_raster)
                else:
                    change_values_mean[raster_name] = 0.0
            unique_vegetation_counts[reachid] = raw_values_unique
            reach_average_riparian[reachid] = riparian_values_mean
            reach_average_change[reachid] = change_values_mean

    progbar.finish()

    with SQLiteCon(outputs_gpkg_path) as gpkg:
        # Ensure all reaches are present in the ReachAttributes table before storing RVD output values
        gpkg.curs.execute('INSERT INTO ReachAttributes (ReachID) SELECT ReachID FROM ReachGeometry;')

        errs = 0
        for reachid, epochs in unique_vegetation_counts.items():
            for epoch in epochs.values():
                insert_values = [[reachid, int(vegetationid), float(count * cell_area), int(count)] for vegetationid, count in zip(epoch[0], epoch[1]) if vegetationid != 0]
                try:
                    gpkg.curs.executemany('''INSERT INTO ReachVegetation (
                        ReachID,
                        VegetationID,
                        Area,
                        CellCount)
                        VALUES (?,?,?,?)''', insert_values)
                # Sqlite can't report on SQL errors so we have to print good log messages to help intuit what the problem is
                except sqlite3.IntegrityError as err:
                    # THis is likely a constraint error.
                    errstr = "Integrity Error when inserting records: ReachID: {} VegetationIDs: {}".format(reachid, str(list(epoch[0])))
                    log.error(errstr)
                    errs += 1
                except sqlite3.Error as err:
                    # This is any other kind of error
                    errstr = "SQL Error when inserting records: ReachID: {} VegetationIDs: {} ERROR: {}".format(reachid, str(list(epoch[0])), str(err))
                    log.error(errstr)
                    errs += 1
        if errs > 0:
            raise Exception('Errors were found inserting records into the database. Cannot continue.')
        gpkg.conn.commit()

    # load RVD departure levels from DepartureLevels database table
    with SQLiteCon(outputs_gpkg_path) as gpkg:
        gpkg.curs.execute('SELECT LevelID, MaxRVD FROM DepartureLevels ORDER BY MaxRVD ASC')
        departure_levels = gpkg.curs.fetchall()

    # Calcuate Average Departure for Riparian and Native Riparian
    riparian_departure_values = riparian_departure(reach_average_riparian, departure_levels)
    write_db_attributes(outputs_gpkg_path, riparian_departure_values, departure_type_columns)

    # Add Conversion Code, Type to Vegetation Conversion
    with SQLiteCon(outputs_gpkg_path) as gpkg:
        gpkg.curs.execute('SELECT LevelID, MaxValue, NAME FROM ConversionLevels ORDER BY MaxValue ASC')
        conversion_levels = gpkg.curs.fetchall()
    reach_values_with_conversion_codes = classify_conversions(reach_average_change, conversion_ids, conversion_levels)
    write_db_attributes(outputs_gpkg_path, reach_values_with_conversion_codes, rvd_columns)

    # # Write Output to GPKG table
    # log.info('Insert values to GPKG tables')

    # # TODO move this to write_attirubtes method
    # with get_shp_or_gpkg(outputs_gpkg_path, layer_name='ReachAttributes', write=True, ) as in_layer:
    #     # Create each field and store the name and index in a list of tuples
    #     field_indices = [(field, in_layer.create_field(field, field_type)) for field, field_type in {
    #         "FromConifer": ogr.OFTReal,
    #         "FromDevegetated": ogr.OFTReal,
    #         "FromGrassShrubland": ogr.OFTReal,
    #         "FromDeciduous": ogr.OFTReal,
    #         "NoChange": ogr.OFTReal,
    #         "Deciduous": ogr.OFTReal,
    #         "GrassShrubland": ogr.OFTReal,
    #         "Devegetation": ogr.OFTReal,
    #         "Conifer": ogr.OFTReal,
    #         "Invasive": ogr.OFTReal,
    #         "Development": ogr.OFTReal,
    #         "Agriculture": ogr.OFTReal,
    #         "ConversionCode": ogr.OFTInteger,
    #         "ConversionType": ogr.OFTString}.items()]

    #     for feature, _counter, _progbar in in_layer.iterate_features("Writing Attributes", write_layers=[in_layer]):
    #         reach = feature.GetFID()
    #         if reach not in reach_values_with_conversion_codes:
    #             continue

    #         # Set all the field values and then store the feature
    #         for field, _idx in field_indices:
    #             if field in reach_values_with_conversion_codes[reach]:
    #                 if not reach_values_with_conversion_codes[reach][field]:
    #                     feature.SetField(field, None)
    #                 else:
    #                     feature.SetField(field, reach_values_with_conversion_codes[reach][field])
    #         in_layer.ogr_layer.SetFeature(feature)

    #     # Create each field and store the name and index in a list of tuples
    #     field_indices = [(field, in_layer.create_field(field, field_type)) for field, field_type in {
    #         "EXISTING_RIPARIAN_MEAN": ogr.OFTReal,
    #         "HISTORIC_RIPARIAN_MEAN": ogr.OFTReal,
    #         "RIPARIAN_DEPARTURE": ogr.OFTReal,
    #         "EXISTING_NATIVE_RIPARIAN_MEAN": ogr.OFTReal,
    #         "HISTORIC_NATIVE_RIPARIAN_MEAN": ogr.OFTReal,
    #         "NATIVE_RIPARIAN_DEPARTURE": ogr.OFTReal, }.items()]

    #     for feature, _counter, _progbar in in_layer.iterate_features("Writing Attributes", write_layers=[in_layer]):
    #         reach = feature.GetFID()
    #         if reach not in riparian_departure_values:
    #             continue

    #         # Set all the field values and then store the feature
    #         for field, _idx in field_indices:
    #             if field in riparian_departure_values[reach]:
    #                 if not riparian_departure_values[reach][field]:
    #                     feature.SetField(field, None)
    #                 else:
    #                     feature.SetField(field, riparian_departure_values[reach][field])
    #         in_layer.ogr_layer.SetFeature(feature)

    # with sqlite3.connect(outputs_gpkg_path) as conn:
    #     cursor = conn.cursor()
    #     errs = 0
    #     for reachid, epochs in unique_vegetation_counts.items():
    #         for epoch in epochs.values():
    #             insert_values = [[reachid, int(vegetationid), float(count * cell_area), int(count)] for vegetationid, count in zip(epoch[0], epoch[1]) if vegetationid != 0]
    #             try:
    #                 cursor.executemany('''INSERT INTO ReachVegetation (
    #                     ReachID,
    #                     VegetationID,
    #                     Area,
    #                     CellCount)
    #                     VALUES (?,?,?,?)''', insert_values)
    #             # Sqlite can't report on SQL errors so we have to print good log messages to help intuit what the problem is
    #             except sqlite3.IntegrityError as err:
    #                 # THis is likely a constraint error.
    #                 errstr = "Integrity Error when inserting records: ReachID: {} VegetationIDs: {}".format(reachid, str(list(epoch[0])))
    #                 log.error(errstr)
    #                 errs += 1
    #             except sqlite3.Error as err:
    #                 # This is any other kind of error
    #                 errstr = "SQL Error when inserting records: ReachID: {} VegetationIDs: {} ERROR: {}".format(reachid, str(list(epoch[0])), str(err))
    #                 log.error(errstr)
    #                 errs += 1
    #     if errs > 0:
    #         raise Exception('Errors were found inserting records into the database. Cannot continue.')
    #     conn.commit()

    # Add intermediates and the report to the XML
    # project.add_project_geopackage(proj_nodes['Intermediates'], LayerTypes['INTERMEDIATES']) already
    # added above
    project.add_project_geopackage(proj_nodes['Outputs'], LayerTypes['OUTPUTS'])

    # Add the report to the XML
    report_path = os.path.join(project.project_dir, LayerTypes['REPORT'].rel_path)
    project.add_report(proj_nodes['Outputs'], LayerTypes['REPORT'], replace=True)

    report = RVDReport(report_path, project)
    report.write()

    log.info('RVD complete')
예제 #12
0
def hydrology(gpkg_path: str, prefix: str, huc: str):
    """Calculate low flow, peak flow discharges for each reach
    in a BRAT database

    Arguments:
        database {str} -- Path to BRAT SQLite database
        prefix {str} -- Q2 or QLow identifying which discharge to calculate
        huc {str} -- watershed identifier

    Raises:
        Exception: When the watershed is missing the regional discharge equation
    """

    hydrology_field = 'iHyd_Q{}'.format(prefix)
    streampower_field = 'iHyd_SP{}'.format(prefix)

    log = Logger('Hydrology')
    log.info('Calculating Q{} hydrology for HUC {}'.format(prefix, huc))
    log.info('Discharge field: {}'.format(hydrology_field))
    log.info('Stream power field: {}'.format(streampower_field))

    # Load the hydrology equation for the HUC
    with SQLiteCon(gpkg_path) as database:
        database.curs.execute(
            'SELECT Q{} As Q FROM Watersheds WHERE WatershedID = ?'.format(
                prefix), [huc])
        equation = database.curs.fetchone()['Q']
        equation = equation.replace('^', '**')

        if not equation:
            raise Exception('Missing {} hydrology formula for HUC {}'.format(
                prefix, huc))

        log.info('Regional curve: {}'.format(equation))

        # Load the hydrology CONVERTED parameters for the HUC (the values will be in the same units as used in the regional equations)
        database.curs.execute(
            'SELECT Parameter, ConvertedValue FROM vwHydroParams WHERE WatershedID = ?',
            [huc])
        params = {
            row['Parameter']: row['ConvertedValue']
            for row in database.curs.fetchall()
        }
        [
            log.info('Param: {} = {:.2f}'.format(key, value))
            for key, value in params.items()
        ]

        # Load the conversion factor for converting reach attribute drainage areas to the values used in the regional equations
        database.curs.execute(
            'SELECT Conversion FROM HydroParams WHERE Name = ?',
            [DRNAREA_PARAM])
        drainage_conversion_factor = database.curs.fetchone()['Conversion']
        log.info('Reach drainage area attribute conversion factor = {}'.format(
            drainage_conversion_factor))

    # Load the discharges for each reach
    reaches = load_attributes(gpkg_path, ['iGeo_DA'], '(iGeo_DA IS NOT NULL)')
    log.info('{:,} reaches loaded with valid drainage area values'.format(
        len(reaches)))

    # Calculate the discharges for each reach
    results = calculate_hydrology(reaches, equation, params,
                                  drainage_conversion_factor, hydrology_field)
    log.info('{:,} reach hydrology values calculated.'.format(len(results)))

    # Write the discharges to the database
    write_db_attributes(gpkg_path, results, [hydrology_field])

    # Convert discharges to stream power
    with SQLiteCon(gpkg_path) as database:
        database.curs.execute(
            'UPDATE ReachAttributes SET {0} = ROUND((1000 * 9.80665) * iGeo_Slope * ({1} * 0.028316846592), 2)'
            ' WHERE ({1} IS NOT NULL) AND (iGeo_Slope IS NOT NULL)'.format(
                streampower_field, hydrology_field))
        database.conn.commit()

    log.info('Hydrology calculation complete')