def ss_heads(self): """ Pull Steady State Heads from FHD. Return SS Heads (arr 74 * 51) """ fhd_file = [f for f in os.listdir(self.path) if f.endswith('fhd')][0] head_file = op.join(self.path, fhd_file) try: hds = ff.FormattedHeadFile(head_file, precision='single') except: hds = ff.FormattedHeadFile(head_file, precision='double') return hds.get_alldata(mflay=0)[0]
def _ts_heads(args): """ Pull heads from fhd file in parallel """ scenario, path_pickle = args slr_name = op.basename(scenario) slr = slr_name[4:7] head_file = op.join(scenario, op.basename(scenario) + '.fhd') try: hds = ff.FormattedHeadFile(head_file, precision='single') except: hds = ff.FormattedHeadFile(head_file, precision='double') heads = hds.get_alldata(mflay=0) res_path = op.join(path_pickle, 'heads_{}.npy'.format(slr)) np.save(res_path, heads)
def main(head_file, kper): try: hds = ff.FormattedHeadFile(head_file, precision='single') except: hds = ff.FormattedHeadFile(head_file, precision='double') heads = hds.get_alldata(mflay=0) mat_heads = heads[kper] df_heads = pd.DataFrame(mat_heads.flatten(), index=range(10001, 10001 + 3774), columns=['Head']) rows = [] for col in range(mat_heads.shape[1]): rows.extend(range(1, mat_heads.shape[0] + 1)) cols = np.repeat(range(1, mat_heads.shape[1] + 1), mat_heads.shape[0]) df_heads['ROW'] = rows df_heads['COL'] = cols return df_heads
def __init__(self, fn): mf = flopy.modflow.Modflow.load(fn, model_ws=os.path.dirname(fn), load_only=['DIS', 'BAS6', 'UPW']) mf_files = gw_utils.get_mf_files(fn) hds_fn = mf_files['hds'][1] hob_out_fn = mf_files['HOB'][1] self.fn_hobOut = hob_out_fn try: # text file import flopy.utils.formattedfile as ff hds = ff.FormattedHeadFile(hds_fn, precision='single') except: # binary hds = flopy.utils.HeadFile(hds_fn) pass self.mf = mf self.hds = hds self.layout()
import os import demjson import matplotlib.pyplot as plt import flopy.modflow as mf import flopy.utils.formattedfile as ff import pprint workspace = os.path.join('data') pprint.pprint(workspace) modelname = 'Model_for_Pirna' ml = mf.Modflow.load(modelname+'.nam', exe_name='mf2005', model_ws=workspace, verbose=True) #ml.run_model() hdobj = ff.FormattedHeadFile(os.path.join(workspace, modelname+'.fhd'), precision='single') kstpkper = hdobj.get_kstpkper() resultsG21 = hdobj.get_ts((1, 10, 52)) for result in resultsG21: print "{};{}".format(result[0]/86400, result[1])
import numpy as np import flopy import flopy.utils.binaryfile as bf import flopy.utils.formattedfile as ff import flopy.utils.reference as srf import shutil fmain = r'C:\PEST_examples\fault_example' # main directory for all files # specify modflow modeldirectory and load relevant model data mfdir = fmain + os.sep + 'Model\modflow' mdlname = 'example' # name of modflow model mf = flopy.modflow.Modflow.load(mfdir + os.sep + mdlname + '.nam') # import modflow object hdobj = ff.FormattedHeadFile( mfdir + os.sep + mdlname + '.hds', precision='single' ) # import heads file as flopy object ( output from modflow) hk = mf.lpf.hk.array # get hydraulic conductivity array (used for initial guess for parameters) Lx = sum(mf.dis.delr.array) Ly = sum(mf.dis.delc.array) xul = -Lx / 2 yul = Ly / 2 grid_ref = flopy.utils.reference.SpatialReference(delr=mf.dis.delr, delc=mf.dis.delc, lenuni=mf.dis.lenuni, xul=xul, yul=yul) ## load pilot point data fpath = fmain + os.sep + 'Model\pilot_point_and_variogram_data.npy' # path to pilot point data
# import model print('loading model with flopy ... ') mf = flopy.modflow.Modflow.load(namfile) print(' ... done') font = { 'family': 'serif', 'color': 'navy', 'weight': 'semibold', 'size': 14, 'backgroundcolor': 'white', } fhd_file = os.path.join(res_dir, 'Full', mod_nam + '.fhd') hdobj1 = ff.FormattedHeadFile(fhd_file, precision='single', verbose=True) rec1 = hdobj1.get_data(idx=60) rec1_2 = hdobj1.get_data(idx=32) fhd_file = os.path.join(res_dir, mod_type, mod_nam + '.fhd') hdobj2 = ff.FormattedHeadFile(fhd_file, precision='single', verbose=True) rec2 = hdobj2.get_data(idx=60) rec2_2 = hdobj2.get_data(idx=32) levels = np.arange(-50, 0, 2) norm = mpl.colors.Normalize(vmin=zh_min * 100.0, vmax=zh_max * 100.0) # plt.rcdefaults() newparams = { 'figure.dpi': 150, 'savefig.dpi': 300, 'font.family': 'serif',
def mf_get_all(path_root, mf_step, **params): """ Purpose: Get head from .fhd (need head even when water is discharging to land) Get soil water from uzf gage files. Soil water is average over whole unsaturated zone (or just top). Args: path_root: filepath where MF dir is with .uzfbs and .fhd mf_step: timestep to use Returns: pd.DataFrame Notes: Set soil water when water at land surface to porosity Need to set to something. """ mf_model = os.path.join(path_root, 'MF', params.get('name')) try: hds = ff.FormattedHeadFile(mf_model + '.fhd', precision='double') except: raise SystemError(mf_model + '.fhd does not exist.\nCheck paths') try: uzf = bf.CellBudgetFile(mf_model + '.uzfcb2.bin', precision='single') except: uzf = bf.CellBudgetFile(mf_model + '.uzfcb2.bin', precision='double') head_fhd = hds.get_data(totim=mf_step + 1, mflay=0) uzf_data = abs(uzf.get_data(text='SURFACE LEAKAGE', totim=mf_step + 1)[0]) arr_surf = np.load( op.join(op.dirname(op.dirname(path_root)), 'Data', 'Land_Z.npy')).reshape(head_fhd.shape) # intialize numpy arrays that will get updated based on row/col location index = np.linspace(10001, 10000 + head_fhd.size, head_fhd.size, dtype=int) theta_uzfb = np.empty(head_fhd.shape) theta_uzfb[:] = np.nan row_np = np.empty(head_fhd.shape) row_np[:] = np.nan col_np = np.empty(head_fhd.shape) col_np[:] = np.nan regex = re.compile('({}.uzf[0-9]+)'.format(params.get('name')[-5:])) gage_files = [ os.path.join(os.path.dirname(mf_model), uzfile) for uzfile in os.listdir(os.path.dirname(mf_model)) if regex.search(uzfile) ] # convert time step to guess at line where it starts in uzfb file # gage_files = [test for test in gage_files if test.endswith('2008')]# or test.endswith('2004')] for i, each in enumerate(gage_files): linecache.clearcache() nums = re.findall(r'\d+', linecache.getline(each, 1)) # convert row/col to 0 index row = int(nums[1]) - 1 col = int(nums[2]) - 1 # store row/col in np array for table row_np[row][col] = row col_np[row][col] = col line_start = mf_step * 40 + 4 looking = True # begin looking for correct time step while looking: # line with time; blank when surface leakage - cause time to be wrong header = linecache.getline(each, line_start).split() # mf_step + 1, skip steady state (0) try: if int(float(header[1])) == mf_step + 1: theta = 0 # first depth, for checking coupling -- or maybe use for x in range(line_start, line_start + 1): # average: # for x in range(line_start, line_start+40): # theta += float(linecache.getline(each, x).split()[-1])/40 theta_uzfb[row][col] = theta looking = False elif int(float(header[1])) < mf_step + 1: theta_uzfb[row][col] = params.get('Por') head_fhd[row][col] = arr_surf[row][col] - params.get( 'diff') linecache.clearcache() looking = False else: raise TypeError('How is mf_step + 1 less than header?') except: line_start -= 40 linecache.clearcache() if line_start < 4: # print (row,col) theta_uzfb[row][col] = params.get('Por', 0.3) head_fhd[row][col] = arr_surf[row][col] - params.get( 'diff') looking = False row_np = row_np + 1 col_np = col_np + 1 # stack 1ds into multidimensional nd array for pandas columns to be correct to_stack = [row_np, col_np, head_fhd, theta_uzfb, uzf_data] unraveled = [each.ravel() for each in to_stack] stacked = np.column_stack(unraveled) unraveled.append(index) stacked = np.column_stack(unraveled) # drop non subcatchments (where row is nan) mf_subs = stacked[~np.isnan(stacked[:, 0])] return mf_subs