def tenpar(): import os import numpy as np import pyemu os.chdir(os.path.join("smoother", "10par_xsec")) csv_files = [f for f in os.listdir('.') if f.endswith(".csv")] [os.remove(csv_file) for csv_file in csv_files] es = pyemu.EnsembleSmoother("10par_xsec.pst", num_slaves=5, use_approx=True) lz = es.get_localizer().to_dataframe() #the k pars upgrad of h01_04 and h01_06 are localized upgrad_pars = [pname for pname in lz.columns if "_" in pname and\ int(pname.split('_')[1]) > 4] lz.loc["h01_04", upgrad_pars] = 0.0 upgrad_pars = [pname for pname in lz.columns if '_' in pname and \ int(pname.split('_')[1]) > 6] lz.loc["h01_06", upgrad_pars] = 0.0 lz = pyemu.Matrix.from_dataframe(lz).T print(lz) es.initialize(num_reals=20) for it in range(20): es.update(lambda_mults=[0.1, 1.0, 10.0]) #,localizer=lz,run_subset=20) #es.update(lambda_mults=[1.0]) os.chdir(os.path.join("..", ".."))
def chenoliver(): import os import numpy as np import pyemu os.chdir(os.path.join("smoother", "chenoliver")) csv_files = [ f for f in os.listdir('.') if f.endswith(".csv") and "bak" not in f ] [os.remove(csv_file) for csv_file in csv_files] parcov = pyemu.Cov(x=np.ones((1, 1)), names=["par"], isdiagonal=True) pst = pyemu.Pst("chenoliver.pst") obscov = pyemu.Cov(x=np.ones((1, 1)) * 16.0, names=["obs"], isdiagonal=True) es = pyemu.EnsembleSmoother(pst, parcov=parcov, obscov=obscov, num_slaves=20, use_approx=False) es.initialize(num_reals=100) for it in range(40): es.update() os.chdir(os.path.join("..", ".."))
def freyberg_smoother_test(): import os import pyemu pst = pyemu.Pst(os.path.join("smoother","freyberg.pst")) #mc = pyemu.MonteCarlo(pst=pst) #mc.draw(2) #print(mc.parensemble) num_reals = 5 es = pyemu.EnsembleSmoother(pst) es.initialize(num_reals) es.update()
def henry(): import os import pyemu os.chdir(os.path.join("smoother", "henry_pc")) csv_files = [f for f in os.listdir('.') if f.endswith(".csv")] [os.remove(csv_file) for csv_file in csv_files] pst = pyemu.Pst(os.path.join("henry.pst")) es = pyemu.EnsembleSmoother(pst, num_slaves=15, use_approx=True) es.initialize(210, init_lambda=1.0) for i in range(10): es.update(lambda_mults=[0.2,5.0],run_subset=45) os.chdir(os.path.join("..", ".."))
def freyberg(): import os import pandas as pd import pyemu os.chdir(os.path.join("smoother","freyberg")) if not os.path.exists("freyberg.xy"): import flopy ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws="template", load_only=[]) xy = pd.DataFrame([(x,y) for x,y in zip(ml.sr.xcentergrid.flatten(),ml.sr.ycentergrid.flatten())], columns=['x','y']) names = [] for i in range(ml.nrow): for j in range(ml.ncol ): names.append("hkr{0:02d}c{1:02d}".format(i,j)) xy.loc[:,"name"] = names xy.to_csv("freyberg.xy") else: xy = pd.read_csv("freyberg.xy") csv_files = [f for f in os.listdir('.') if f.endswith(".csv")] [os.remove(csv_file) for csv_file in csv_files] pst = pyemu.Pst(os.path.join("freyberg.pst")) es = pyemu.EnsembleSmoother(pst,num_slaves=20,use_approx=True) nothk_names = [pname for pname in pst.adj_par_names if "hk" not in pname] parcov_nothk = es.parcov.get(row_names=nothk_names) gs = pyemu.utils.geostats.read_struct_file("structure.dat") cov = gs.covariance_matrix(xy.x,xy.y,xy.name) import matplotlib.pyplot as plt plt.imshow(cov.x,interpolation="nearest") plt.show() return #gs.variograms[0].a=10000 #gs.variograms[0].contribution=0.01 #gs.variograms[0].anisotropy = 10.0 pp_df = pyemu.utils.gw_utils.pp_file_to_dataframe("points1.dat") parcov_hk = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name) parcov_full = parcov_hk.extend(parcov_rch) es.initialize(300,init_lambda=5000.0) for i in range(3): es.update(lambda_mults=[0.2,5.0],run_subset=40) os.chdir(os.path.join("..",".."))
def ies(): os.chdir(pyemu_dir) es = pyemu.EnsembleSmoother("pest.pst", verbose="ies.log", save_mats=True, slave_dir=os.path.join("..", "template"), num_slaves=5) es.initialize(parensemble="par.csv", obsensemble="obs.csv", restart_obsensemble="restart_obs.csv") for i in range(es.pst.control_data.noptmax): es.update(use_approx=False) #es.update(lambda_mults=[0.1,1.0,# 10.0],run_subset=10) #es.update(lambda_mults=[0.1,1.0,10.0],run_subset=10) os.chdir('..')
def ies(): pst.control_data.noptmax = 1 ies = pyemu.EnsembleSmoother(pst=pst, verbose="ies.log") ies.initialize(parensemble="par1.csv", obsensemble="obs1.csv") ies.update(lambda_mults=[10., 1, 0.1]) ies.update(lambda_mults=[10., 1, 0.1])