예제 #1
0
def setup_lake_connectiondata(model,
                              include_horizontal_connections=True):

    cfg = model.cfg['lak']

    # set up littoral and profundal zones
    if model.lake_info is None:
        model.lake_info = setup_lake_info(model)
    lakzones = make_bdlknc_zones(model.modelgrid, model.lake_info,
                                 include_ids=model.lake_info['feat_id'])
    model.setup_external_filepaths('lak', 'lakzones',
                                   cfg['{}_filename_fmt'.format('lakzones')],
                                   nfiles=1)
    save_array(model.cfg['intermediate_data']['lakzones'][0], lakzones, fmt='%d')

    # make the (2D) areal footprint of lakebed leakance from the zones
    bdlknc = make_bdlknc2d(lakzones,
                           cfg['source_data']['littoral_leakance'],
                           cfg['source_data']['profundal_leakance'])

    # cell tops and bottoms
    layer_elevations = np.zeros((model.nlay + 1, model.nrow, model.ncol), dtype=float)
    layer_elevations[0] = model.dis.top.array
    layer_elevations[1:] = model.dis.botm.array

    lakeno = []
    cellid = []
    bedleak = []
    for lake_id in range(1, model.nlakes + 1):

        # get the vertical GWF connections for each lake
        k, i, j = np.where(model.lakarr == lake_id)
        # get just the unique i, j locations for each lake
        i, j = zip(*set(zip(i, j)))
        # assign vertical connections to the highest active layer
        k = np.argmax(model.idomain[:, i, j], axis=0)
        cellid += list(zip(k, i, j))
        lakeno += [lake_id] * len(k)
        bedleak += list(bdlknc[i, j])

    df = pd.DataFrame({'lakeno': lakeno,
                       'cellid': cellid,
                       'claktype': 'vertical',
                       'bedleak': bedleak,
                       'belev': 0.,
                       'telev': 0.,
                       'connlen': 0.,
                       'connwidth': 0.
                       })

    if include_horizontal_connections:
        for lake_id in range(1, model.nlakes + 1):
            lake_extent = model.lakarr == lake_id
            horizontal_connections = get_horizontal_connections(lake_extent,
                                                                layer_elevations,
                                                                model.dis.delr.array,
                                                                model.dis.delc.array,
                                                                bdlknc)
            # drop horizontal connections to inactive cells
            k, i, j = zip(*horizontal_connections['cellid'])
            inactive = model.idomain[k, i, j] < 1
            horizontal_connections = horizontal_connections.loc[~inactive].copy()
            horizontal_connections['lakeno'] = lake_id
            df = df.append(horizontal_connections)
    # assign iconn (connection number) values for each lake
    dfs = []
    for lakeno, group in df.groupby('lakeno'):
        group = group.copy()
        group['iconn'] = list(range(len(group)))
        dfs.append(group)
    df = pd.concat(dfs)
    df['lakeno'] -= 1  # convert to zero-based for mf6
    return df
예제 #2
0
    def setup_lak(self):

        print('setting up LAKE package...')
        t0 = time.time()
        # if shapefile of lakes was included,
        # lakarr should be automatically built by property method
        if self.lakarr.sum() == 0:
            print("lakes_shapefile not specified, or no lakes in model area")
            return

        # source data
        source_data = self.cfg['lak']['source_data']
        self.lake_info = setup_lake_info(self)
        nlakes = len(self.lake_info)

        # set up the tab files, if any
        tab_files_argument = None
        tab_units = None
        start_tab_units_at = 150  # default starting number for iunittab
        if 'stage_area_volume_file' in source_data:

            tab_files = setup_lake_tablefiles(
                self, source_data['stage_area_volume_file'])
            tab_units = list(
                range(start_tab_units_at, start_tab_units_at + len(tab_files)))

            # tabfiles aren't rewritten by flopy on package write
            self.cfg['lak']['tab_files'] = tab_files
            # kludge to deal with ugliness of lake package external file handling
            # (need to give path relative to model_ws, not folder that flopy is working in)
            tab_files_argument = [os.path.relpath(f) for f in tab_files]

        self.setup_external_filepaths(
            'lak', 'lakzones',
            self.cfg['lak']['{}_filename_fmt'.format('lakzones')])
        self.setup_external_filepaths(
            'lak',
            'bdlknc',
            self.cfg['lak']['{}_filename_fmt'.format('bdlknc')],
            file_numbers=list(range(self.nlay)))

        # make the arrays or load them
        lakzones = make_bdlknc_zones(self.modelgrid,
                                     self.lake_info,
                                     include_ids=self.lake_info['feat_id'])
        save_array(self.cfg['intermediate_data']['lakzones'][0],
                   lakzones,
                   fmt='%d')

        bdlknc = np.zeros((self.nlay, self.nrow, self.ncol))
        # make the areal footprint of lakebed leakance from the zones (layer 1)
        bdlknc[0] = make_bdlknc2d(
            lakzones, self.cfg['lak']['source_data']['littoral_leakance'],
            self.cfg['lak']['source_data']['profundal_leakance'])
        for k in range(self.nlay):
            if k > 0:
                # for each underlying layer, assign profundal leakance to cells were isbc == 1
                bdlknc[k][self.isbc[k] == 1] = self.cfg['lak']['source_data'][
                    'profundal_leakance']
            save_array(self.cfg['intermediate_data']['bdlknc'][0][k],
                       bdlknc[k],
                       fmt='%.6e')

        # get estimates of stage from model top, for specifying ranges
        stages = []
        for lakid in self.lake_info['lak_id']:
            loc = self.lakarr[0] == lakid
            est_stage = self.dis.top.array[loc].min()
            stages.append(est_stage)
        stages = np.array(stages)

        # setup stress period data
        tol = 5  # specify lake stage range as +/- this value
        ssmn, ssmx = stages - tol, stages + tol
        stage_range = list(zip(ssmn, ssmx))

        # set up dataset 9
        # ssmn and ssmx values only required for steady-state periods > 0
        self.lake_fluxes = setup_lake_fluxes(self)
        precip = self.lake_fluxes['precipitation'].tolist()
        evap = self.lake_fluxes['evaporation'].tolist()
        flux_data = {}
        for i, steady in enumerate(self.dis.steady.array):
            if i > 0 and steady:
                flux_data_i = []
                for lake_ssmn, lake_ssmx in zip(ssmn, ssmx):
                    flux_data_i.append(
                        [precip[i], evap[i], 0, 0, lake_ssmn, lake_ssmx])
            else:
                flux_data_i = [[precip[i], evap[i], 0, 0]] * nlakes
            flux_data[i] = flux_data_i
        options = ['tableinput'] if tab_files_argument is not None else None

        kwargs = self.cfg['lak']
        kwargs['nlakes'] = len(self.lake_info)
        kwargs['stages'] = stages
        kwargs['stage_range'] = stage_range
        kwargs['flux_data'] = flux_data
        kwargs[
            'tab_files'] = tab_files_argument  #This needs to be in the order of the lake IDs!
        kwargs['tab_units'] = tab_units
        kwargs['options'] = options
        kwargs['ipakcb'] = self.ipakcb
        kwargs['lwrt'] = 0
        kwargs = get_input_arguments(kwargs, fm.mflak.ModflowLak)
        lak = fm.ModflowLak(self, **kwargs)
        print("finished in {:.2f}s\n".format(time.time() - t0))
        return lak
예제 #3
0
def setup_lake_connectiondata(model,
                              for_external_file=True,
                              include_horizontal_connections=True):

    cfg = model.cfg['lak']

    # set up littoral and profundal zones
    if model.lake_info is None:
        model.lake_info = setup_lake_info(model)

    # zone numbers
    # littoral zones are the same as the one-based lake number
    # profundal zones are the one-based lake number times 100
    # for example, for lake 1, littoral zone is 1; profundal zone is 100.
    lakzones = make_bdlknc_zones(model.modelgrid,
                                 model.lake_info,
                                 include_ids=model.lake_info['feat_id'])
    littoral_profundal_zones = get_littoral_profundal_zones(lakzones)

    model.setup_external_filepaths('lak', 'lakzones',
                                   cfg['{}_filename_fmt'.format('lakzones')])
    save_array(model.cfg['intermediate_data']['lakzones'][0],
               lakzones,
               fmt='%d')

    # make the (2D) areal footprint of lakebed leakance from the zones
    bdlknc = make_bdlknc2d(lakzones, cfg['source_data']['littoral_leakance'],
                           cfg['source_data']['profundal_leakance'])

    # cell tops and bottoms
    layer_elevations = np.zeros((model.nlay + 1, model.nrow, model.ncol),
                                dtype=float)
    layer_elevations[0] = model.dis.top.array
    layer_elevations[1:] = model.dis.botm.array

    lakeno = []
    cellid = []
    bedleak = []
    zone = []
    for lake_id in range(1, model.nlakes + 1):

        # get the vertical GWF connections for each lake
        k, i, j = np.where(model.lakarr == lake_id)
        # get just the unique i, j locations for each lake
        i, j = zip(*set(zip(i, j)))
        # assign vertical connections to the highest active layer
        k = np.argmax(model.idomain[:, i, j], axis=0)
        cellid += list(zip(k, i, j))
        lakeno += [lake_id] * len(k)
        bedleak += list(bdlknc[i, j])
        zone += list(littoral_profundal_zones[i, j])

    df = pd.DataFrame({
        'lakeno': lakeno,
        'cellid': cellid,
        'claktype': 'vertical',
        'bedleak': bedleak,
        'belev': 0.,
        'telev': 0.,
        'connlen': 0.,
        'connwidth': 0.,
        'zone': zone
    })

    if include_horizontal_connections:
        for lake_id in range(1, model.nlakes + 1):
            lake_extent = model.lakarr == lake_id
            horizontal_connections = get_horizontal_connections(
                lake_extent,
                connection_info=True,
                layer_elevations=layer_elevations,
                delr=model.dis.delr.array,
                delc=model.dis.delc.array,
                bdlknc=bdlknc)
            # drop horizontal connections to inactive cells
            k, i, j = horizontal_connections[['k', 'i', 'j']].T.values
            inactive = model.idomain[k, i, j] < 1
            horizontal_connections = horizontal_connections.loc[
                ~inactive].copy()
            horizontal_connections['lakeno'] = lake_id
            k, i, j = horizontal_connections[['k', 'i', 'j']].T.values
            horizontal_connections['zone'] = littoral_profundal_zones[i, j]
            horizontal_connections['cellid'] = list(zip(k, i, j))
            horizontal_connections.drop(['k', 'i', 'j'], axis=1, inplace=True)
            df = df.append(horizontal_connections)
    # assign iconn (connection number) values for each lake
    dfs = []
    for lakeno, group in df.groupby('lakeno'):
        group = group.copy()
        group['iconn'] = list(range(len(group)))
        dfs.append(group)
    df = pd.concat(dfs)

    connections_lookup_file = model.cfg['lak']['output_files'][
        'connections_lookup_file'].format(model.name)
    connections_lookup_file = os.path.join(
        model._tables_path,
        os.path.split(connections_lookup_file)[1])
    model.cfg['lak']['output_files'][
        'connections_lookup_file'] = connections_lookup_file
    df.to_csv(connections_lookup_file, index=False)

    # convert to one-based and comment out header if df will be written straight to external file
    if for_external_file:
        df.rename(columns={'lakeno': '#lakeno'}, inplace=True)
        df['iconn'] += 1
        k, i, j = zip(*df['cellid'])
        df.drop('cellid', axis=1, inplace=True)
        df['k'] = np.array(k) + 1
        df['i'] = np.array(i) + 1
        df['j'] = np.array(j) + 1
    else:
        df['lakeno'] -= 1  # convert to zero-based for mf6
    return df