コード例 #1
0
def test_get_inset_boundary_heads(tmr, parent_heads):
    """Verify that inset model specified head boundary accurately
    reflects parent model head solution, including when cells
    are dry or missing (e.g. pinched out cells in MF6).
    """
    bheads_df = tmr.get_inset_boundary_heads()
    groups = bheads_df.groupby('per')
    all_kstpkper = parent_heads.get_kstpkper()
    kstpkper_list = [all_kstpkper[0], all_kstpkper[-1]]
    for kstp, kper in kstpkper_list:
        hds = parent_heads.get_data(kstpkper=(kstp, kper))
        df = groups.get_group(kper)
        df['cellid'] = list(zip(df.k, df.i, df.j))
        # check for duplicate locations (esp. corners)
        # in mf2005, duplicate chd heads will be summed
        assert not df.cellid.duplicated().any()

        # x, y, z locations of inset model boundary cells
        ix = tmr.inset.modelgrid.xcellcenters[df.i, df.j]
        iy = tmr.inset.modelgrid.ycellcenters[df.i, df.j]
        iz = tmr.inset.modelgrid.zcellcenters[df.k, df.i, df.j]

        # parent model grid cells associated with inset boundary cells
        i, j = get_ij(tmr.parent.modelgrid, ix, iy)
        k = get_layer(tmr.parent.dis.botm.array, i, j, iz)
コード例 #2
0
ファイル: bcs.py プロジェクト: surajitdb/modflow-setup
def setup_ghb_data(model):

    m = model
    source_data = model.cfg['ghb'].get('source_data').copy()
    # get the GHB cells
    # todo: generalize more of the GHB setup code and move it somewhere else
    if 'shapefile' in source_data:
        shapefile_data = source_data['shapefile']
        key = [k for k in shapefile_data.keys() if 'filename' in k.lower()][0]
        shapefile_name = shapefile_data.pop(key)
        ghbcells = rasterize(shapefile_name, m.modelgrid, **shapefile_data)
    else:
        raise NotImplementedError('Only shapefile input supported for GHBs')

    cond = model.cfg['ghb'].get('cond')
    if cond is None:
        raise KeyError("key 'cond' not found in GHB yaml input. "
                       "Must supply conductance via this key for GHB setup.")

    # sample DEM for minimum elevation in each cell with a GHB
    # todo: GHB: allow time-varying bheads via csv input
    vertices = np.array(m.modelgrid.vertices)[ghbcells.flat > 0, :, :]
    polygons = [Polygon(vrts) for vrts in vertices]
    if 'dem' in source_data:
        key = [
            k for k in source_data['dem'].keys() if 'filename' in k.lower()
        ][0]
        dem_filename = source_data['dem'].pop(key)
        with rasterio.open(dem_filename) as src:
            meta = src.meta

        # reproject the polygons to the dem crs if needed
        try:
            from gisutils import get_authority_crs
            dem_crs = get_authority_crs(src.crs)
        except:
            dem_crs = pyproj.crs.CRS.from_user_input(src.crs)
        if dem_crs != m.modelgrid.crs:
            polygons = project(polygons, m.modelgrid.crs, dem_crs)

        all_touched = False
        if meta['transform'][0] > m.modelgrid.delr[0]:
            all_touched = True
        results = zonal_stats(polygons,
                              dem_filename,
                              stats='min',
                              all_touched=all_touched)
        min_elevs = np.ones((m.nrow * m.ncol), dtype=float) * np.nan
        min_elevs[ghbcells.flat > 0] = np.array([r['min'] for r in results])
        units_key = [k for k in source_data['dem'] if 'units' in k]
        if len(units_key) > 0:
            min_elevs *= convert_length_units(source_data['dem'][units_key[0]],
                                              model.length_units)
        min_elevs = np.reshape(min_elevs, (m.nrow, m.ncol))
    else:
        raise NotImplementedError(
            'Must supply DEM to sample for GHB elevations\n'
            '(GHB: source_data: dem:)')

    # make a DataFrame with MODFLOW input
    i, j = np.indices((m.nrow, m.ncol))
    df = pd.DataFrame({
        'per': 0,
        'k': 0,
        'i': i.flat,
        'j': j.flat,
        'bhead': min_elevs.flat,
        'cond': cond
    })
    df.dropna(axis=0, inplace=True)

    # assign layers so that bhead is above botms
    df['k'] = get_layer(model.dis.botm.array, df.i, df.j, df.bhead)
    # remove GHB cells from places where the specified head is below the model
    below_bottom_of_model = df.bhead < model.dis.botm.array[-1, df.i,
                                                            df.j] + 0.01
    df = df.loc[~below_bottom_of_model].copy()

    # exclude inactive cells
    k, i, j = df.k, df.i, df.j
    if model.version == 'mf6':
        active_cells = model.idomain[k, i, j] >= 1
    else:
        active_cells = model.ibound[k, i, j] >= 1
    df = df.loc[active_cells]
    return df
コード例 #3
0
ファイル: wateruse.py プロジェクト: wkitlasten/modflow-setup
def read_wdnr_monthly_water_use(wu_file, wu_points, model,
                                active_area=None,
                                drop_ids=None,
                                minimum_layer_thickness=2
                                ):
    """Read water use data from a master file generated from
    WDNR_wu_data.ipynb. Cull data to area of model. Reshape
    to one month-year-site value per row.

    Parameters
    ----------
    wu_file : csv file
        Water use data ouput from the WDNR_wu_data.ipynb.
    wu_points : point shapefile
        Water use locations, generated in the WDNR_wu_data.ipynb
        Must be in same CRS as sr.
    model : flopy.modflow.Modflow instance
        Must have a valid attached .sr attribute defining the model grid.
        Only wells within the bounds of the sr will be retained.
        Sr is also used for row/column lookup.
        Must be in same CRS as wu_points.
    active_area : str (shapefile path) or shapely.geometry.Polygon
        Polygon denoting active area of the model. If specified,
        wells are culled to this area instead of the model bounding box.
        (default None)
    minimum_layer_thickness : scalar
        Minimum layer thickness to have pumping.

    Returns
    -------
    monthly_data : DataFrame

    """
    col_fmt = '{}_wdrl_gpm_amt'
    data_renames = {'site_seq_no': 'site_no',
                    'wdrl_year': 'year'}
    df = pd.read_csv(wu_file)
    drop_cols = [c for c in df.columns if 'unnamed' in c.lower()]
    drop_cols += ['objectid']
    df.drop(drop_cols, axis=1, inplace=True, errors='ignore')
    df.rename(columns=data_renames, inplace=True)
    if drop_ids is not None:
        df = df.loc[~df.site_no.isin(drop_ids)].copy()

    # implement automatic reprojection in gis-utils
    # maintaining backwards compatibility
    kwargs = {'dest_crs': model.modelgrid.crs}
    kwargs = get_input_arguments(kwargs, shp2df)
    locs = shp2df(wu_points, **kwargs)
    site_seq_col = [c for c in locs if 'site_se' in c.lower()]
    locs_renames = {c: 'site_no' for c in site_seq_col}
    locs.rename(columns=locs_renames, inplace=True)
    if drop_ids is not None:
        locs = locs.loc[~locs.site_no.isin(drop_ids)].copy()

    if active_area is None:
        # cull the data to the model bounds
        features = model.modelgrid.bbox
        txt = "No wells are inside the model bounds of {}"\
            .format(model.modelgrid.extent)
    elif isinstance(active_area, str):
        # implement automatic reprojection in gis-utils
        # maintaining backwards compatibility
        kwargs = {'dest_crs': model.modelgrid.crs}
        kwargs = get_input_arguments(kwargs, shp2df)
        features = shp2df(active_area, **kwargs).geometry.tolist()
        if len(features) > 1:
            features = MultiPolygon(features)
        else:
            features = Polygon(features[0])
        txt = "No wells are inside the area of {}"\
            .format(active_area)
    elif isinstance(active_area, Polygon):
        features = active_area

    within = [g.within(features) for g in locs.geometry]
    assert len(within) > 0, txt
    locs = locs.loc[within].copy()
    if len(locs) == 0:
        print('No wells within model area:\n{}\n{}'.format(wu_file, wu_points))
        return None, None
    df = df.loc[df.site_no.isin(locs.site_no)]
    df.sort_values(by=['site_no', 'year'], inplace=True)

    # create seperate dataframe with well info
    well_info = df[['site_no',
                    'well_radius_mm',
                    'borehole_radius_mm',
                    'well_depth_m',
                    'elev_open_int_top_m',
                    'elev_open_int_bot_m',
                    'screen_length_m',
                    'screen_midpoint_elev_m']].copy()
    # groupby site number to cull duplicate information
    well_info = well_info.groupby('site_no').first()
    well_info['site_no'] = well_info.index

    # add top elevation, screen midpoint elev, row, column and layer
    points = dict(zip(locs['site_no'], locs.geometry))
    well_info['x'] = [points[sn].x for sn in well_info.site_no]
    well_info['y'] = [points[sn].y for sn in well_info.site_no]

    # have to do a loop because modelgrid.rasterize currently only works with scalars
    print('intersecting wells with model grid...')
    t0 = time.time()
    #i, j = [], []
    #for x, y in zip(well_info.x.values, well_info.y.values):
    #    iy, jx = model.modelgrid.rasterize(x, y)
    #    i.append(iy)
    #    j.append(jx)
    i, j = get_ij(model.modelgrid, well_info.x.values, well_info.y.values)
    print("took {:.2f}s\n".format(time.time() - t0))

    top = model.dis.top.array
    botm = model.dis.botm.array
    thickness = get_layer_thicknesses(top, botm)
    well_info['i'] = i
    well_info['j'] = j
    well_info['elv_m'] = top[i, j]
    well_info['elv_top_m'] = well_info.elev_open_int_top_m
    well_info['elv_botm_m'] = well_info.elev_open_int_bot_m
    well_info['elv_mdpt_m'] = well_info.screen_midpoint_elev_m
    well_info['k'] = get_layer(botm, i, j, elev=well_info['elv_mdpt_m'].values)
    well_info['laythick'] = thickness[well_info.k.values, i, j]
    well_info['ktop'] = get_layer(botm, i, j, elev=well_info['elv_top_m'].values)
    well_info['kbotm'] = get_layer(botm, i, j, elev=well_info['elv_botm_m'].values)

    # for wells in a layer below minimum thickness
    # move to layer with screen top, then screen botm,
    # put remainder in layer 1 and hope for the best
    well_info = wells.assign_layers_from_screen_top_botm(well_info, model,
                                       flux_col='q',
                                       screen_top_col='elv_top_m',
                                       screen_botm_col='elv_botm_m',
                                       across_layers=False,
                                       distribute_by='transmissivity',
                                       minimum_layer_thickness=2.)
    #isthin = well_info.laythick < minimum_layer_thickness
    #well_info.loc[isthin, 'k'] = well_info.loc[isthin, 'ktop'].values
    #well_info.loc[isthin, 'laythick'] = model.dis.thickness.array[well_info.k[isthin].values,
    #                                                              well_info.i[isthin].values,
    #                                                              well_info.j[isthin].values]
    #isthin = well_info.laythick < minimum_layer_thickness
    #well_info.loc[isthin, 'k'] = well_info.loc[isthin, 'kbotm'].values
    #well_info.loc[isthin, 'laythick'] = model.dis.thickness.array[well_info.k[isthin].values,
    #                                                              well_info.i[isthin].values,
    #                                                              well_info.j[isthin].values]
    #isthin = well_info.laythick < minimum_layer_thickness
    #well_info.loc[isthin, 'k'] = 1
    #well_info.loc[isthin, 'laythick'] = model.dis.thickness.array[well_info.k[isthin].values,
    #                                                              well_info.i[isthin].values,
    #                                                              well_info.j[isthin].values]
    isthin = well_info.laythick < minimum_layer_thickness
    assert not np.any(isthin)

    # make a datetime column
    monthlyQ_cols = [col_fmt.format(calendar.month_abbr[i]).lower()
                     for i in range(1, 13)]
    monthly_data = df[['site_no', 'year'] + monthlyQ_cols]
    monthly_data.columns = ['site_no', 'year'] + np.arange(1, 13).tolist()

    # stack the data
    # so that each row is a site number, year, month
    # reset the index to move multi-index levels back out to columns
    stacked = monthly_data.set_index(['site_no', 'year']).stack().reset_index()
    stacked.columns = ['site_no', 'year', 'month', 'gallons']
    stacked['datetime'] = pd.to_datetime(['{}-{:02d}'.format(y, m)
                                          for y, m in zip(stacked.year, stacked.month)])
    monthly_data = stacked
    return well_info, monthly_data