def worker(jobq,pid):
    while True:    
        args = jobq.get()
        if args == None:
            break        
        dname = args[1][0].split('.')[0].split('\\')[-1]
        smp = pu.smp(args[1][0],load=True)
        rtype = smp.records.keys()[0]
        smp.records[dname] = smp.records[smp.records.keys()[0]]
        smp.records.pop(rtype)
        for smpname in args[1][1:]:
            smp2 = pu.smp(smpname,load=True)
            rtype = smp2.records.keys()[0]
            smp2.records[dname] = smp2.records[smp2.records.keys()[0]]
            smp2.records.pop(rtype)
            smp.merge(smp2,how='left')
        print '----'+args[0]                   
        smp.save(args[0])
        jobq.task_done()        
    jobq.task_done()
    return        
import numpy
from datetime import datetime
import pandas
import pestUtil
reload(pestUtil)


#--load a single site
smp = pestUtil.smp('heads_gap.out')
#smp = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y')
site_names,offset_idx = smp.get_unique_from_file(smp.site_index,needindices=True)
#--load only one site
site_0 = smp.load(site=site_names[0])


#--load a single site with pandas
smpp = pestUtil.smp('heads_gap.out',pandas=True)
#smp = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y')
site_names,offset_idx = smpp.get_unique_from_file(smp.site_index,needindices=True)
#--load only one site
site_0p = smpp.load(site=site_names[0])


#--load all sites
smp2 = pestUtil.smp('heads_gap.out',load=True)
#smp2 = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y',load=True)
site_0,dates,vals = smp2.get_site(site_names[0])


#--load all sites with pandas
smp2p = pestUtil.smp('heads_gap.out',load=True,pandas=True)
Beispiel #3
0
    on_date += timedelta(days=1.)
    plot_dates.append(on_date)
plot_dates = np.array(plot_dates)

#--initialize figure and figure/plot counters
ifigure = 1
iplot = 1
nplots = 6
fig = Make_NewFigure()
#--matplotlib date specification
years, months = mdates.YearLocator(), mdates.MonthLocator(
)  #every year, every month
yearsFmt = mdates.DateFormatter('%Y')

#--open smp file
smp = pestUtil.smp(ObsFile, load=False, date_fmt='%m/%d/%Y')
#--process each station in the xml file
iwell = 0
pct_lay = np.empty((nlay), np.float)
active_gwobs = []
active_gwobs_stats = []
for idx, gwhead in enumerate(root.findall('gwHead')):
    station = gwhead.attrib['name']
    print 'Locating {0} in the model grid'.format(station)
    coordType = gwhead.find('coordType').text
    #--determine the location of the well
    if coordType.lower() == 'model':
        #--convert to zero based
        irow = int(gwhead.find('row').text) - 1
        icol = int(gwhead.find('column').text) - 1
    elif coordType.lower() == 'site':
import numpy as np
import pylab
import pandas
import pestUtil as pu

from bro_pred import flow

smp = pu.smp('noaa_sp.smp',load=True,pandas=True)
df = smp.records
df = df.resample('1M',how='mean')
#--use the last value in the dataframe as the base for the projections
start_value = df['noaa'][-1]
print start_value
low_rate = 0.015 #9 inches in 50 years
med_rate = 0.0275 #16.5 inches in 50 years
high_rate = 0.05 #24 inches in 50 years
df_slr = pandas.DataFrame({'no_rise':start_value},index=flow.sp_end)
nyears = df_slr.index.year - df_slr.index[0].year
df_slr['low_rise'] = start_value + low_rate * nyears
df_slr['high_rise'] = start_value + high_rate * nyears
df_slr['med_rise'] = start_value + med_rate * nyears

df_slr.to_csv('noaa_slr.csv',index_label='datetime')
df_slr.plot()
pylab.show()


    sim_mult = 1.0
    c = swflow.find('simmult')
    if c != None:
        sim_mult = float( c.text )
    print 'Processing flow data for...{0} - reach {1}->{2}'.format( station, usreach, dsreach )
    obs.fill( no_data )
    processObs = False
    nobs = 0
    child = swflow.find('ObsItems')
    if child != None:
        processObs = True
        for childitem in child.findall('ObsFile'):
            print 'processing observation data...{0}'.format(childitem.text) 
            #--get the smp file data
            smpstation = os.path.basename(childitem.text).replace('.smp','')
            smp = pestUtil.smp(childitem.text,load=True,date_fmt='%m/%d/%Y')
            nobs, obs = dataAdd(unit_conv,sim_dates,obs,smp.records[smpstation][:,0],smp.records[smpstation][:,1])
#    obs *= unit_conv
    #--read the simulated data
    if dsreach > 0:
        #SWRObj = mfb.SWR_Record(-2,SWRFlowFile)
        #SWRObj.rewind_file()
        #ce1 = SWRObj.get_gage(rec_num=usreach,iconn=dsreach)
        ce1 = SWRObj.get_time_gage(rec_num=usreach,iconn=dsreach)
        nt =  np.shape(ce1)[0]
        sim = np.zeros( (nt,2), np.float )
        for jdx in xrange(0,nt):
            sim[jdx,0] = ce1[jdx,itime]
            sim[jdx,1] = ce1[jdx,iflow] * unit_conv * -1. * sim_mult
    #--plot the data
    ax = fig.add_subplot(nplots,1,iplot)
import numpy as np
import pylab
import pestUtil as pu


#--observation smps
obs_files = ['UMD.01\\obsref\\head\heads.smp']
mod_files= ['UMD.01\\modref\\head\mheads.smp']
plt_dir = 'png\\'
for obs_file,mod_file in zip(obs_files,mod_files):
    obs_smp = pu.smp(obs_file,load=True,date_fmt='%m/%d/%Y')
    mod_smp = pu.smp(mod_file,load=True,date_fmt='%m/%d/%Y')
    sites = obs_smp.records.keys()
    for site in sites:
        obs = obs_smp.records[site]
        mod = mod_smp.records[site]
        fig = pylab.figure(figsize=(5,5))
        ax = pylab.subplot(111)
        ax.plot(obs[:,0],obs[:,1],'b-',label='obs')
        ax.plot(mod[:,0],mod[:,1],'g-',label='mod')
        ax.grid()
        ax.legend()
        fname = plt_dir+site+'.png'
        pylab.savefig(fname,dpi=300,format='png',bbox_inches='tight')
        pylab.close('all')
        #break

Beispiel #7
0
import os
import numpy as np
#from dateutil.relativedelta import relativedelta
from datetime import timedelta
import pandas
from shapely.geometry import Point
import shapefile
import pestUtil as pu
from bro import seawat
'''spatailly declusters relconc data and thin out site that are not in the active domain
'''

#--load the relconc smp for spatial declustering
df = pu.smp('relconc.smp', load=True, pandas=True).records

#--get the nwis site no and locations
nwis_shapename = '..\\_gis\\scratch\\broward_nwis_gw_conc_depth'
shapes, records = shapefile.load_as_dict(nwis_shapename)
nwis_sitenos = records['site_no']
nwis_icbnd = records['icbnd']
#nwis_names = []
#for n in records['station_nm']:
#    n = n.replace(' ','')
#    n = n.replace(',','')
#    nwis_names.append(n)

nwis_pts = []
for shape in shapes:
    pt = Point(shape.points[0])
    nwis_pts.append(pt)
from datetime import datetime
import pandas
import shapefile

import pestUtil as pu
from bro import flow

#--get the dbhydro sampled monthly series
db_dir = '..\\_dbhydro\\stressperiod_stage_smp_navd\\'
db_files = os.listdir(db_dir)
#db_names = []
db_dict = {}
for dfile in db_files:
    dname = dfile.split('.')[0].upper().strip()
    #db_names.append(dname)
    smp = pu.smp(db_dir+dfile,load=True,pandas=True)
    db_dict[dname] = copy.deepcopy(smp.records)

#--get the coastal stage record - sampled to stress periods
noaa_file = '..\\_noaa\\noaa_sp.smp'
noaa_smp = pu.smp(noaa_file,load=True,pandas=True)
noaa_df = noaa_smp.records

#--load the reach shapefile from swrpre
swr_shapename = '..\\_gis\\scratch\\sw_reaches_conn_SWRpolylines_2'
shp = shapefile.Reader(swr_shapename)
fnames = shapefile.get_fieldnames(swr_shapename)
ibnd_idx,stg_idx,reach_idx = 16,17,22

stg_dict = {}
m_range = flow.sp_end
    #        e = t - d
    #        elevs.append(e)
    #    except:
    #        elevs.append(None)
    d = None
    if hd != '':
        d = float(hd)
    elif wd != '':
        d = float(wd)
    if d != None:
        e = t - d
        elevs.append(e)
    else:
        elevs.append(None)
#--load the decluster dataframe
smp = pu.smp('..\\..\\_nwis\\navd_declustered.smp', load=True, pandas=True)
df = smp.records

f = open('..\\..\\_nwis\\nwis_navd_bore_coords.dat', 'w', 0)
f_miss = open('..\\..\\_nwis\\navd.missing', 'w', 0)
wr = shapefile.writer_like(shapename)
wr.field('fixed_name', fieldType='C', size=50)
for siteno in df.keys():
    if siteno in sitenos:
        idx = sitenos.index(siteno)
        name = fixed[idx]
        r = rows[idx]
        c = cols[idx]
        g = geom[:, int(float(r)) - 1, int(float(c)) - 1]
        x = xs[idx]
        y = ys[idx]
import pylab
import pestUtil as pu


smp_obs = pu.smp(fname='UMD.01\\obsref\\head\\heads.smp',load=True,date_fmt='%m/%d/%Y')
#smp_mod = pu.smp(fname='UMD.01\\modref\\head\\mheads.smp',load=True,date_fmt='%m/%d/%Y')

smp_filt = pu.load_smp_from_tsproc('processed_mod_vs_obs_biweek_filtered.dat')

#--get a list of 'observation' series names
osites = []
for site in smp_filt.records.keys():
    if site.endswith('_o') and site not in osites:
        osites.append(site)

plt_dir = 'png\\filt'
for site in osites:
    print site
    filt_obs = smp_filt.records[site]
    raw_obs = smp_obs.records[site[:-2].upper()]
    filt_mod = smp_filt.records[site[:-2]]
    #raw_mod = smp_mod.records[site[:-2].upper()]
    #fig = pylab.figure(figsize=(5,5))
    fig = pylab.figure()
    ax = pylab.subplot(111)    
    #ax.plot(filt_obs[:,0],filt_obs[:,1],'b.',color='b')
    ax.plot(filt_obs[:,0],filt_obs[:,1],'b-',label='processed',lw=0.5,color='b')
    
    #ax.plot(filt_mod[:,0],filt_mod[:,1],'g.',color='g')
    #ax.plot(filt_mod[:,0],filt_mod[:,1],'g-',label='modfilt',lw=0.5,color='g')
    ax.plot(raw_obs[:,0],raw_obs[:,1],'g-',label='native',lw=0.5,color='g')
Beispiel #11
0
import os
import pestUtil

smp_dir = 'raw_smp\\'
smp_files = os.listdir(smp_dir)

for smp_file in smp_files:
    print smp_file
    smp = pestUtil.smp(smp_dir+smp_file,load=True)
    smp.make_unique()
    smp.save('test\\'+smp_file)
Beispiel #12
0
f = open(stg_reach_file, 'r')
reach_dict = {}
header = f.readline()
for line in f:
    raw = line.strip().split(',')
    name = raw[0].upper().replace(' ', '_').replace('-', '')
    if name.endswith('W'):
        name = name[:-1]
    reach_dict[name] = int(raw[1])
f.close()

#parser = lambda x: datetime.strptime(x,tc.DATE_FMT+' %H:%M:%S')
#stage_df = pandas.read_table(stg_obs_file,header=None,parse_dates=[[1,2]],date_parser=parser,sep='\s*')
#stage_df.columns = ['datetime','site','value']

stage_smp = pu.smp(stg_obs_file, date_fmt=tc.DATE_FMT, pandas=True, load=True)
stage_sites = stage_smp.records.keys()
for site in stage_sites:
    if site not in reach_dict.keys():
        print 'site not found in reach dict', site

obs_names = []
mod_names = []
reach_numbers = []
smp_site_names = []
for i, site in enumerate(reach_dict.keys()):
    if site not in stage_sites:
        print 'site not found in smp file', site
        reach_dict.pop(site)
    else:
        obs_names.append('ost_{0:03.0f}or'.format(i + 1))
        raise Exception('Duplicate names: '+n)
    fixed.append(n)

#--build sample elevation list
elevs = []
for r,c,hd,wd in zip(rows,cols,hdepths,wdepths):
    t = geom[0,int(float(r))-1,int(float(c))-1]
    try:
        d = float(hd)
    except:
        d = float(wd)
    e = t - d
    elevs.append(e)

#--load the thinned dataframe
smp = pu.smp('..\\..\\_nwis\\relconc_thinned.smp',load=True,pandas=True)
df = smp.records


f = open('..\\..\\_nwis\\nwis_bore_coords.dat','w',0)
wr = shapefile.writer_like(shapename)
wr.field('fixed_name',fieldType='C',size=50)
for siteno in df.keys():
    if siteno in sitenos:
        idx = sitenos.index(siteno)
        name = fixed[idx]
        r = rows[idx]
        c = cols[idx]
        g = geom[:,int(float(r))-1,int(float(c))-1]
        x = xs[idx]
        y = ys[idx]
        raise Exception('Duplicate names: ' + n)
    fixed.append(n)

#--build sample elevation list
elevs = []
for r, c, hd, wd in zip(rows, cols, hdepths, wdepths):
    t = geom[0, int(float(r)) - 1, int(float(c)) - 1]
    try:
        d = float(hd)
    except:
        d = float(wd)
    e = t - d
    elevs.append(e)

#--load the decluster dataframe
smp = pu.smp('..\\..\\_nwis\\relconc_declustered.smp', load=True, pandas=True)
df = smp.records

f = open('..\\..\\_nwis\\nwis_conc_bore_coords.dat', 'w', 0)
wr = shapefile.writer_like(shapename)
wr.field('fixed_name', fieldType='C', size=50)
for siteno in df.keys():
    if siteno in sitenos:
        idx = sitenos.index(siteno)
        name = fixed[idx]
        r = rows[idx]
        c = cols[idx]
        g = geom[:, int(float(r)) - 1, int(float(c)) - 1]
        x = xs[idx]
        y = ys[idx]
        e = elevs[idx]
'''

K = 200.0
L = 500.0
A = 500.0 * 100.0
cond = K * A / L

print 'loading north,south model-aligned dataframe and interpolation factors'
df_ns_data = pandas.read_csv(
    '..\\..\\_nwis\\dataframes\\ghb_NS_stages_model.csv',
    index_col=0,
    parse_dates=True)
df_ns_fac = pandas.read_csv('ghb_NS_factors.dat', index_col=[0, 1])

#--process stage record for coastal GHBs - presampled
noaa_smp = pu.smp('..\\..\\_noaa\\noaa_sp.smp', load=True)
coastal_stages = noaa_smp.records['noaa'][:, 1]

#--process stage records for WCA ghbs - these records should have been presampled to stress period dimensions
print 'processing EDEN records'
smp_dir = '..\\..\\_eden\\stage_smp_full\\'
smp_files = os.listdir(smp_dir)
eden_ids = []
eden_sp_vals = {}
for sfile in smp_files:
    print 'loading smp file', sfile, '\r',
    eden_id = int(sfile.split('.')[0])
    eden_ids.append(eden_id)
    smp = pu.smp(smp_dir + sfile, load=True)
    rec = smp.records[str(eden_id)]
    #--some defense
import pandas
import pestUtil as pu
import bro

#--model stress period date range
m_range = bro.sp_end

#--merged daily smp files
smp_dir = 'daily_stage_smp_navd\\'
smp_files = os.listdir(smp_dir)
out_dir = 'stressperiod_stage_smp_navd\\'
dfs = []
for i,sfile in enumerate(smp_files):
    print 'processing ',sfile,i,' of ',len(smp_files),'\r',
    depname = sfile.split('.')[0]
    smp = pu.smp(smp_dir+sfile,load=True,pandas=True)
    
    df = smp.records
    rname = df.keys()[0]
    df[depname] = df[rname]
    df.pop(rname)
    df = df.astype(float)    
    df = df.resample(bro.pandas_freq,how=np.mean)
    #--create dataframe that is aligned with model stress periods
    #--merge in the record and fill with 0.0
    df_mn = pandas.DataFrame({depname:np.NaN},index=m_range)
    df_mn = df_mn.combine_first(df)     
    df_mn = df_mn[bro.start:bro.end]
    df_mn = df_mn.dropna() 
    dfs.append(df_mn)
    smp.records = df_mn
Beispiel #17
0
import numpy as np
import pylab
import pestUtil as pu

#--observation smps
obs_files = ['UMD.01\\obsref\\head\heads.smp']
mod_files = ['UMD.01\\modref\\head\mheads.smp']
plt_dir = 'png\\'
for obs_file, mod_file in zip(obs_files, mod_files):
    obs_smp = pu.smp(obs_file, load=True, date_fmt='%m/%d/%Y')
    mod_smp = pu.smp(mod_file, load=True, date_fmt='%m/%d/%Y')
    sites = obs_smp.records.keys()
    for site in sites:
        obs = obs_smp.records[site]
        mod = mod_smp.records[site]
        fig = pylab.figure(figsize=(5, 5))
        ax = pylab.subplot(111)
        ax.plot(obs[:, 0], obs[:, 1], 'b-', label='obs')
        ax.plot(mod[:, 0], mod[:, 1], 'g-', label='mod')
        ax.grid()
        ax.legend()
        fname = plt_dir + site + '.png'
        pylab.savefig(fname, dpi=300, format='png', bbox_inches='tight')
        pylab.close('all')
        #break
Beispiel #18
0
def main():
    
    #--instance
    tsproc_infile = 'tsproc_setup.dat'
    tsp = tc.tsproc(tsproc_infile,out_file='processed.dat',out_fmt='long',)
    if os.path.exists('pest.pst'):
        os.remove('pest.pst')
    pest_mblocks,pest_oblocks = [],[]

   
    

    #---------------------------
    #--heads
    #--------------------------
    print 'procesing heads'
    ohead_name = 'smp\\obs\\hds.smp'
    mhead_name = 'smp\\mod\\hds.smp'
    #shutil.copy2(mhead_name,ohead_name)
    
    
    ohead = pu.smp(ohead_name,load=False,site_index=True)    
    mhead = pu.smp(mhead_name,load=False,site_index=True)
    #--some model sites are not included - thin them out

    keep_mlist = []
    names = []
    mlist = list(mhead.site_list)
    olist = list(ohead.site_list)
    for site in mlist:
        if site in olist:
            keep_mlist.append(site)
        else:
            print 'missing mhead site',site
            #missing.append(site)


    oblocks = tsp.get_mul_series_ssf(keep_mlist,ohead_name,suffix='oh',\
        role="final",context=tc.PEST_CONTEXT)
    mblocks = tsp.get_mul_series_ssf(keep_mlist,mhead_name,suffix='sh')
    renamed = tsp.copy_2_series(mblocks,keep_mlist,role='final',wght=1.0)
    pest_oblocks.extend(oblocks)
    pest_mblocks.extend(renamed)

   
    print 'build a list of template and model-equivalent files'
    tpl_dir = 'tpl\\'
    modin_dir = os.path.join("model","ref")
    tpl_files,modin_files = [],[]
    files = os.listdir(tpl_dir)
    for f in files:
        modin_file = f.replace(".tpl",'')
        modin_files.append(os.path.join(modin_dir,modin_file))
        tpl_file = os.path.join(tpl_dir,f)        
        tpl_files.append(tpl_file)
            
    par_file = os.path.join("misc","par.info")
    grp_file = os.path.join("misc","grp.info")

    #--write the model run tspoc file 
    print 'writing model run tsproc file'                             
    tsp.set_context('model_run')
    tsp.tsproc_file = 'misc\\tsproc_model_run.dat'
    tsp.write_tsproc()

    #--write the setup tsproc file
    print 'writing setup tsproc file'
    tsp.write_pest(tpl_files,modin_files,pest_oblocks,pest_mblocks,\
                   svd=True,parms=par_file,parm_grp=grp_file,\
                   model_cmd="model.bat")
    
    tsp.set_context(tc.PEST_CONTEXT)
    tsp.tsproc_file = 'misc\\tsproc_setup.dat'
    tsp.write_tsproc()

    f = open(os.path.join('misc','tsproc_setup.in'),'w')
    f.write(os.path.join('misc','tsproc_setup.dat')+'\n'+\
                        os.path.join('misc','tsproc_setup.out')+\
                            '\ny\ny\n')
    f.close()

    f = open(os.path.join('misc','tsproc_model_run.in'),'w')
    f.write(os.path.join('misc','tsproc_model_run.dat')+'\n'+\
            os.path.join('misc','tsproc_model_run.out')+'\ny\ny\n')
    f.close()

    print 'running tsproc'
    os.system(os.path.join("exe",'tsproc.exe')+' <'+os.path.join("misc","tsproc_setup.in")+\
              ' >'+os.path.join("misc","tsproc_screen.out"))
        raise Exception('Duplicate names: '+n)
    fixed.append(n)

#--build sample elevation list
elevs = []
for r,c,hd,wd in zip(rows,cols,hdepths,wdepths):
    t = geom[0,int(float(r))-1,int(float(c))-1]
    try:
        d = float(hd)
    except:
        d = float(wd)
    e = t - d
    elevs.append(e)

#--load the decluster dataframe
smp = pu.smp('..\\..\\_nwis\\relconc_declustered.smp',load=True,pandas=True)
df = smp.records

f = open('..\\..\\_nwis\\nwis_conc_bore_coords.dat','w',0)
wr = shapefile.writer_like(shapename)
wr.field('fixed_name',fieldType='C',size=50)
for siteno in df.keys():
    if siteno in sitenos:
        idx = sitenos.index(siteno)
        name = fixed[idx]
        r = rows[idx]
        c = cols[idx]
        g = geom[:,int(float(r))-1,int(float(c))-1]
        x = xs[idx]
        y = ys[idx]
        e = elevs[idx]
        raise Exception('Duplicate names: ' + n)
    fixed.append(n)

#--build sample elevation list
elevs = []
for r, c, hd, wd in zip(rows, cols, hdepths, wdepths):
    t = geom[0, int(float(r)) - 1, int(float(c)) - 1]
    try:
        d = float(hd)
    except:
        d = float(wd)
    e = t - d
    elevs.append(e)

#--load the thinned dataframe
smp = pu.smp('..\\..\\_nwis\\relconc_thinned.smp', load=True, pandas=True)
df = smp.records

f = open('..\\..\\_nwis\\nwis_bore_coords.dat', 'w', 0)
wr = shapefile.writer_like(shapename)
wr.field('fixed_name', fieldType='C', size=50)
for siteno in df.keys():
    if siteno in sitenos:
        idx = sitenos.index(siteno)
        name = fixed[idx]
        r = rows[idx]
        c = cols[idx]
        g = geom[:, int(float(r)) - 1, int(float(c)) - 1]
        x = xs[idx]
        y = ys[idx]
        e = elevs[idx]
import os
import numpy as np
import pylab
import shapefile
import pestUtil as pu

#smp_dir = 'pws_smp\\'
#smp_files = os.listdir(smp_dir)
#smp_files = os.listdir(smp_dir)#convert = 7.481/24.0/60.0 #to gpm

smp_dir ='.\\'
smp_files = ['sum.smp']
convert = 7.481/1.0e6/30 #to mgd - approx 30 days in a month
plt_dir = 'png\\'
f_out = open('missing.dat','w')
  
for smp_file in smp_files:
        print smp_file
        smp = pu.smp(smp_dir+smp_file,load=True)
        for site,record in smp.records.iteritems():
            smp.records[site][:,1] *= convert
        ax = smp.plot(None)
        pylab.show()
pass


date_fmt = '%m/%d/%Y'

smp_dir = 'UMD.01\\obsref\\head\\'
smp_files = os.listdir(smp_dir)

start = datetime(1996, 1, 1, 12)
end = datetime(2004, 12, 31, 12)

site_names = []
f = open('misc\\bore_coords.dat', 'r')
for line in f:
    site_names.append(line.split()[0].strip().upper())
f.close()

obs_smp = pu.smp('UMD.01\\obsref\\head\ALL_NWIS_GW.smp',
                 date_fmt=date_fmt,
                 load=True)
filt_smp = pu.smp('UMD.01\\obsref\\head\heads.smp', date_fmt=date_fmt)

f = open('misc\\missing_sites.dat', 'w')
for site in obs_smp.records.keys():
    #drange = obs_smp.get_daterange(site_name=site)
    if site.replace('-', '').strip().upper() in site_names:
        filt_smp.records[site.replace('-', '')] = obs_smp.records[site]

    else:
        print 'missing site', site
        f.write('missing site ' + site + '\n')

f.close()
filt_smp.set_daterange(start, end)
import os
import pandas
import shapefile
import pestUtil as pu


#--get a list of site that are within the broward domain
shape_name = '..\\_gis\\scratch\\broward_nwis_gw_conc_depth'
records = shapefile.load_as_dict(shape_name,loadShapes=False)
sitenos = records['site_no']
dir_dict = {'chl':'smp_rel_conc_chl\\','cond':'smp_rel_conc_regressed\\','tds':'smp_rel_conc_tds\\'}
df_dict = {}
for dtype,smp_dir in dir_dict.iteritems():
    files = os.listdir(smp_dir)
    dfs = []
    for f in files:
        
        raw = f.split('.')
        siteno,sitename = raw[1],raw[2]
        if siteno in sitenos:
            print dtype,f
            smp = pu.smp(smp_dir+f,load=True,pandas=True)
            df = smp.records
            df[siteno] = df['site']
            df.pop('site')
            dfs.append(df)
        
    df = pandas.concat(dfs,axis=1)    
    df.to_csv('dataframes\\'+dtype+'.csv',index_label='datetime')
import numpy as np
import pylab
import pandas
import pestUtil as pu

from bro_pred import flow

smp = pu.smp('noaa_sp.smp', load=True, pandas=True)
df = smp.records
df = df.resample('1M', how='mean')
#--use the last value in the dataframe as the base for the projections
start_value = df['noaa'][-1]
print start_value
low_rate = 0.015  #9 inches in 50 years
med_rate = 0.0275  #16.5 inches in 50 years
high_rate = 0.05  #24 inches in 50 years
df_slr = pandas.DataFrame({'no_rise': start_value}, index=flow.sp_end)
nyears = df_slr.index.year - df_slr.index[0].year
df_slr['low_rise'] = start_value + low_rate * nyears
df_slr['high_rise'] = start_value + high_rate * nyears
df_slr['med_rise'] = start_value + med_rate * nyears

df_slr.to_csv('noaa_slr.csv', index_label='datetime')
df_slr.plot()
pylab.show()
import os
import numpy as np
import pylab
import pandas
import pestUtil as pu

obs_dir = "smp\\obs\\"
mod_dir = "smp\\mod\\"
obs_files, mod_files = os.listdir(obs_dir), os.listdir(mod_dir)

obs_color, mod_color = "b", "g"
plt_dir = "png\\obs_vs_sim\\"
for ofile in obs_files:
    assert ofile in mod_files, ofile + " not found in model smp files"
    osmp = pu.smp(obs_dir + ofile, load=True, pandas=True)
    msmp = pu.smp(mod_dir + ofile, load=True, pandas=True)
    odf = osmp.records
    mdf = msmp.records
    # --plotting

    if "NAVD" in ofile.upper():
        ylabel = "water level $ft NAVD$"
        ylim = [-10.0, 10.0]
        plt_prefix = plt_dir + "navd_"
    else:
        ylabel = "relative concetration"
        ylim = [-0.1, 1.0]
        plt_prefix = plt_dir + "conc_"
    for site in odf.keys():
        print site
date_fmt = '%m/%d/%Y'

smp_dir = 'UMD.01\\obsref\\head\\'
smp_files = os.listdir(smp_dir)

start = datetime(1996,1,1,12)
end = datetime(2004,12,31,12)


site_names = []
f = open('misc\\bore_coords.dat','r')
for line in f:
    site_names.append(line.split()[0].strip().upper())
f.close()

obs_smp = pu.smp('UMD.01\\obsref\\head\ALL_NWIS_GW.smp',date_fmt=date_fmt,load=True)
filt_smp = pu.smp('UMD.01\\obsref\\head\heads.smp',date_fmt=date_fmt)

f = open('misc\\missing_sites.dat','w')
for site in obs_smp.records.keys():
    #drange = obs_smp.get_daterange(site_name=site)
    if site.replace('-','').strip().upper() in site_names:
        filt_smp.records[site.replace('-','')] = obs_smp.records[site]

    else:
        print 'missing site',site    
        f.write('missing site '+site+'\n')

f.close()
filt_smp.set_daterange(start,end)    
filt_smp.save('UMD.01\\obsref\\head\heads.smp')
import pylab
import pestUtil as pu

smp_obs = pu.smp(fname='UMD.01\\obsref\\head\\heads.smp',
                 load=True,
                 date_fmt='%m/%d/%Y')
#smp_mod = pu.smp(fname='UMD.01\\modref\\head\\mheads.smp',load=True,date_fmt='%m/%d/%Y')

smp_filt = pu.load_smp_from_tsproc('processed_mod_vs_obs_biweek_filtered.dat')

#--get a list of 'observation' series names
osites = []
for site in smp_filt.records.keys():
    if site.endswith('_o') and site not in osites:
        osites.append(site)

plt_dir = 'png\\filt'
for site in osites:
    print site
    filt_obs = smp_filt.records[site]
    raw_obs = smp_obs.records[site[:-2].upper()]
    filt_mod = smp_filt.records[site[:-2]]
    #raw_mod = smp_mod.records[site[:-2].upper()]
    #fig = pylab.figure(figsize=(5,5))
    fig = pylab.figure()
    ax = pylab.subplot(111)
    #ax.plot(filt_obs[:,0],filt_obs[:,1],'b.',color='b')
    ax.plot(filt_obs[:, 0],
            filt_obs[:, 1],
            'b-',
            label='processed',
import numpy
from datetime import datetime
import pandas
import pestUtil
reload(pestUtil)

#--load a single site
smp = pestUtil.smp('heads_gap.out')
#smp = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y')
site_names, offset_idx = smp.get_unique_from_file(smp.site_index,
                                                  needindices=True)
#--load only one site
site_0 = smp.load(site=site_names[0])

#--load a single site with pandas
smpp = pestUtil.smp('heads_gap.out', pandas=True)
#smp = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y')
site_names, offset_idx = smpp.get_unique_from_file(smp.site_index,
                                                   needindices=True)
#--load only one site
site_0p = smpp.load(site=site_names[0])

#--load all sites
smp2 = pestUtil.smp('heads_gap.out', load=True)
#smp2 = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y',load=True)
site_0, dates, vals = smp2.get_site(site_names[0])

#--load all sites with pandas
smp2p = pestUtil.smp('heads_gap.out', load=True, pandas=True)
#smp2 = pestUtil.smp('s20_h_dbkey13037.smp',date_fmt='%m/%d/%Y',load=True)
site_0, datesp, valsp = smp2p.get_site(site_names[0])
from datetime import datetime
import pandas
import shapefile

import pestUtil as pu
from bro import flow

#--get the dbhydro sampled monthly series
db_dir = '..\\_dbhydro\\stressperiod_stage_smp_navd\\'
db_files = os.listdir(db_dir)
#db_names = []
db_dict = {}
for dfile in db_files:
    dname = dfile.split('.')[0].upper().strip()
    #db_names.append(dname)
    smp = pu.smp(db_dir + dfile, load=True, pandas=True)
    db_dict[dname] = copy.deepcopy(smp.records)

#--get the coastal stage record - sampled to stress periods
noaa_file = '..\\_noaa\\noaa_sp.smp'
noaa_smp = pu.smp(noaa_file, load=True, pandas=True)
noaa_df = noaa_smp.records

#--load the reach shapefile from swrpre
swr_shapename = '..\\_gis\\scratch\\sw_reaches_conn_SWRpolylines_2'
shp = shapefile.Reader(swr_shapename)
fnames = shapefile.get_fieldnames(swr_shapename)
ibnd_idx, stg_idx, reach_idx = 16, 17, 22

stg_dict = {}
m_range = flow.sp_end
    #        e = t - d
    #        elevs.append(e)
    #    except:
    #        elevs.append(None)  
    d = None
    if hd != '':
        d = float(hd)
    elif wd != '':
        d = float(wd)
    if d != None:
        e = t - d
        elevs.append(e)
    else:
        elevs.append(None)                   
#--load the decluster dataframe
smp = pu.smp('..\\..\\_nwis\\navd_declustered.smp',load=True,pandas=True)
df = smp.records

f = open('..\\..\\_nwis\\nwis_navd_bore_coords.dat','w',0)
f_miss = open('..\\..\\_nwis\\navd.missing','w',0)
wr = shapefile.writer_like(shapename)
wr.field('fixed_name',fieldType='C',size=50)
for siteno in df.keys():
    if siteno in sitenos:
        idx = sitenos.index(siteno)
        name = fixed[idx]
        r = rows[idx]
        c = cols[idx]
        g = geom[:,int(float(r))-1,int(float(c))-1]
        x = xs[idx]
        y = ys[idx]
import os
import numpy as np
import pylab
import shapefile
import pestUtil as pu

#smp_dir = 'pws_smp\\'
#smp_files = os.listdir(smp_dir)
#smp_files = os.listdir(smp_dir)#convert = 7.481/24.0/60.0 #to gpm

smp_dir = '.\\'
smp_files = ['sum.smp']
convert = 7.481 / 1.0e6 / 30  #to mgd - approx 30 days in a month
plt_dir = 'png\\'
f_out = open('missing.dat', 'w')

for smp_file in smp_files:
    print smp_file
    smp = pu.smp(smp_dir + smp_file, load=True)
    for site, record in smp.records.iteritems():
        smp.records[site][:, 1] *= convert
    ax = smp.plot(None)
    pylab.show()
pass
import os
import numpy as np
#from dateutil.relativedelta import relativedelta
from datetime import timedelta
import pandas
from shapely.geometry import Point
import shapefile
import pestUtil as pu
from bro import seawat

'''spatailly declusters relconc data and thin out site that are not in the active domain
sites that are missing depth info should not be in the shapefile
'''

#--load the relconc smp for spatial declustering
df = pu.smp('relconc.smp',load=True,pandas=True).records

#--get the nwis site no and locations
nwis_shapename = '..\\_gis\\scratch\\broward_nwis_gw_conc_depth'
shapes,records = shapefile.load_as_dict(nwis_shapename)
nwis_sitenos = records['site_no']
nwis_ibnd = records['icbnd']
#nwis_names = []
#for n in records['station_nm']:
#    n = n.replace(' ','')
#    n = n.replace(',','')
#    nwis_names.append(n)

nwis_pts = []
for shape in shapes:
    pt = Point(shape.points[0])
                site_salt_idxs[site_salt_params.index('chloride')]
            ]
        if 'dissolved' in site_salt_params and 'chloride' in site_salt_params:
            tds_chl_idxs[site_no] = [
                site_salt_idxs[site_salt_params.index('dissolved')],
                site_salt_idxs[site_salt_params.index('chloride')]
            ]

#--load the paired salt files and look for data on the same day
spec_data, chl_data = [], []
dfs = []
site_nos = []
for site_no, idxs in spec_chl_idxs.iteritems():
    print 'loading smp files:', smp_files[idxs[0]], smp_files[idxs[1]]
    spec_smp = pestUtil.smp(smp_in_dir + smp_files[idxs[0]],
                            load=True,
                            pandas=True)
    chl_smp = pestUtil.smp(smp_in_dir + smp_files[idxs[1]],
                           load=True,
                           pandas=True)
    #--filter obvious outliers
    for dt, val in spec_smp.records['site'].iteritems():
        if val > 50000:
            spec_smp.records['site'][dt] = np.NaN
    df = pandas.DataFrame({
        'spec': spec_smp.records['site'],
        'chl': chl_smp.records['site']
    })
    df = df.dropna()
    if not df.empty:
        dfs.append(df)
#--plot relative concentration records - combined plt of chl, spec, and tds
for file in unique_files:
    files = []
    types = []
    reclens = []
    if file in chl_files:
        files.append(chl_dir+file)
        types.append('chl')
    if file in spec_files:
        files.append(spec_dir+file)
        types.append('spec')
    if file in tds_files:
        files.append(tds_dir+file)
        types.append('tds')
    smp = pestUtil.smp(files[0],load=True,pandas=True)
    smp.records[types[0]] = smp.records['site']
    smp.records.pop('site')
    reclens.append(types[0]+' '+str(len(smp.records[types[0]])))
    #--if more than one file was found
    if len(files) > 1:                
        for file,dtype in zip(files[1:],types[1:]):
            other = pestUtil.smp(file,load=True,pandas=True)
            other.records[dtype] = other.records['site']
            other.records.pop('site')
            reclens.append(dtype+' '+str(len(other.records[dtype])))
            smp.records = pandas.merge(smp.records,other.records,left_index=True,right_index=True)
            print
    
    if not smp.records.empty:
        try:
'''for now we assume that flow and seawat have the same row col
'''


K = 200.0
L = 500.0
A = 500.0 * 100.0
cond = K * A / L

print 'loading north,south model-aligned dataframe and interpolation factors'
df_ns_data = pandas.read_csv('..\\..\\_nwis\\dataframes\\ghb_NS_stages_model.csv',index_col=0,parse_dates=True)
df_ns_fac = pandas.read_csv('ghb_NS_factors.dat',index_col=[0,1])


#--process stage record for coastal GHBs - presampled
noaa_smp = pu.smp('..\\..\\_noaa\\noaa_sp.smp',load=True)
coastal_stages = noaa_smp.records['noaa'][:,1]

#--process stage records for WCA ghbs - these records should have been presampled to stress period dimensions
print 'processing EDEN records'
smp_dir = '..\\..\\_eden\\stage_smp_full\\'
smp_files = os.listdir(smp_dir)
eden_ids = []
eden_sp_vals = {}
for sfile in smp_files:
    print 'loading smp file',sfile,'\r',
    eden_id = int(sfile.split('.')[0])
    eden_ids.append(eden_id)
    smp = pu.smp(smp_dir+sfile,load=True)
    rec = smp.records[str(eden_id)]
    #--some defense 
import pandas
import shapefile
import pestUtil as pu

#--get a list of site that are within the broward domain
shape_name = '..\\_gis\\scratch\\broward_nwis_gw_conc_depth'
records = shapefile.load_as_dict(shape_name, loadShapes=False)
sitenos = records['site_no']
dir_dict = {
    'chl': 'smp_rel_conc_chl\\',
    'cond': 'smp_rel_conc_regressed\\',
    'tds': 'smp_rel_conc_tds\\'
}
df_dict = {}
for dtype, smp_dir in dir_dict.iteritems():
    files = os.listdir(smp_dir)
    dfs = []
    for f in files:

        raw = f.split('.')
        siteno, sitename = raw[1], raw[2]
        if siteno in sitenos:
            print dtype, f
            smp = pu.smp(smp_dir + f, load=True, pandas=True)
            df = smp.records
            df[siteno] = df['site']
            df.pop('site')
            dfs.append(df)

    df = pandas.concat(dfs, axis=1)
    df.to_csv('dataframes\\' + dtype + '.csv', index_label='datetime')
import numpy as np
import pandas
import shapefile

import pestUtil as pu
from bro_pred import flow

#--get the dbhydro sampled monthly series and calc average monthly values
db_dir = '..\\_dbhydro\\stressperiod_stage_smp_navd\\'
db_files = os.listdir(db_dir)
#db_names = []
db_dict = {}
for dfile in db_files:
    dname = dfile.split('.')[0].upper().strip()
    #db_names.append(dname)
    smp = pu.smp(db_dir + dfile, load=True, pandas=True)
    df = smp.records
    site_name = df.keys()[0]
    df_monthly = df.groupby(lambda x: x.month).mean()
    df['monthly'] = np.NaN
    for dt in df.index:
        df['monthly'][dt] = df_monthly[site_name][dt.month]
    df[site_name] = df['monthly']
    df.pop('monthly')
    db_dict[dname] = copy.deepcopy(df)

#--get the coastal stage record - sampled to stress periods
#noaa_file = '..\\_noaa\\noaa_slr.smp'
#noaa_smp = pu.smp(noaa_file,load=True,pandas=True)
#noaa_df = noaa_smp.records
noaa_df = pandas.read_csv('..\\_noaa\\noaa_slr.csv',
Beispiel #38
0
tpl_files.append('tpl\\SWR_Dataset11.tpl')

#--start and end
start = datetime(1997,1,1,hour=12)
end = datetime(2010,12,31,hour=12)
start_str = start.strftime(tc.DATE_FMT)
end_str = end.strftime(tc.DATE_FMT)

date_dir = 'date_files\\'

#--instance
tsproc_infile = 'tsproc_setup.dat'
tsp = tc.tsproc(tsproc_infile,out_file='processed.dat',out_fmt='long')

hobs_file = 'UMD.01\\obsref\\head\\heads.smp'
hobs_smp = pu.smp(hobs_file,date_fmt = tc.DATE_FMT,load=True)
hobs_start,hobs_end = hobs_smp.get_daterange(site_name='all',startmin=start,endmax=end)

mobs_file = 'UMD.01\\modref\\head\\mheads.smp'
mobs_smp = pu.smp(mobs_file,date_fmt = tc.DATE_FMT,load=True)

site_names = hobs_smp.records.keys()

#--generate base names for processing
obs_names = []
mod_names = []
for i,s in enumerate(site_names):
    obs_names.append('ogw_{0:03.0f}or'.format(i+1))
    mod_names.append('mgw_{0:03.0f}or'.format(i+1))
    
#--write the load series block
import copy
import os
import pestUtil as pu


date_fmt = '%m/%d/%Y'

smp_dir = 'UMD.01\\obsref\\head\\'
smp_files = os.listdir(smp_dir)

obs_smp = pu.smp('UMD.01\\obsref\\head\heads.smp',date_fmt=date_fmt)
mod_smp = pu.smp('UMD.01\\modref\\head\heads.smp',date_fmt=date_fmt)
fcount = 1
for smp_file in smp_files:
    smp = pu.smp(smp_dir+smp_file,date_fmt=date_fmt,load=True)
    for site,record in smp.records.iteritems():
        obs_name = site
        mod_name = copy.deepcopy(site)
        if len(obs_name) > 7:
            print 'truncate',obs_name
            obs_name = obs_name[:7]
        if obs_name in obs_smp.records.keys():
            fc_str = str(fcount)
            obs_name =  fc_str + obs_name[:-len(fc_str)]
            fcount += 1
            print 'duplicate',obs_name
        obs_smp.records[obs_name] = record
        #mod_smp.records[obs_name] = record
obs_smp.save('UMD.01\\obsref\\head\heads.smp')
#mod_smp.save('UMD.01\\modref\\head\heads.smp')
png_idx = 15
smp_dir = 'smp_waterlevel_navd\\'
for line in f:
    raw = line.strip().split(',')
    png_name = raw[png_idx]
    raw = png_name.split('\\')
    site_no = raw[-1].split('.')[1]
    smp_name = smp_dir + raw[-1].replace('png', 'smp').replace('"', '')
    assert os.path.exists(smp_name)
    smp_files[site_no] = (smp_name)

#--load each smp file and concat into a single df
dfs = []
null_dict = {}
for site_no, smp in smp_files.iteritems():
    df = pu.smp(smp, load=True, pandas=True).records
    df[site_no] = df['site']
    df.pop('site')

    dfs.append(df.dropna())
    null_dict[site_no] = np.NaN
df_south = pandas.concat(dfs, axis=1)

#--calc julian day means
jd_means = df_south.groupby(lambda x: x.timetuple()[7]).mean()

#--create a null daily df over the model time span
dr = pandas.date_range(bro.start, bro.end, freq='1D')
df_daily = pandas.DataFrame(null_dict, index=dr)

#--fill in where we have data
parsed_smp_names = []
for file in smp_files:
    parsed = parse_smp_filename(file) 
    parsed_smp_names.append(parsed)


#--get a list of navd files
navd_files = []
for file,attributes in zip(smp_files,parsed_smp_names):
    if attributes['param'] == 'navd':
        navd_files.append(file)


#--write new smp files NGVD to NAVD
ngvd2navd = -1.5
for file,attributes in zip(smp_files,parsed_smp_names):
    if attributes['param'] == 'ngvd':
        smp = pestUtil.smp(smp_in_dir+file,load=True)
        smp.records['site'][:,1] += ngvd2navd
        attributes['param'] = 'navd'
        new_name = build_smp_filename(attributes)
        if new_name in navd_files:
            other_smp = pestUtil.smp(smp_in_dir+new_name,load=True)            
            smp.merge(other_smp)
            #--remove other smp from the navd list
            navd_files.pop(navd_files.index(new_name))            
        smp.save(smp_out_dir+new_name)
        #print
for file in navd_files:
    shutil.copy(smp_in_dir+file,smp_out_dir+file)
sitenos = {}
drange = pandas.date_range(seawat.start,seawat.end,freq='1D')
for df in dfs:
    for siteno in df.keys():
        if siteno not in sitenos.keys():
            sitenos[siteno] = np.NaN
df_mod = pandas.DataFrame(sitenos,index=drange)

#--fill the null df by merging the data, starting with most accurate
for df in dfs:
    df_mod = df_mod.combine_first(df)
non_nans = df_mod.count()
print non_nans.sum()

df_mod.to_csv(df_dir+'relconc_merged_daily.csv',index_label='datetime')
smp = pu.smp(None,load=False,pandas=True)
smp.records = df_mod
smp.save('relconc.smp',dropna=True)

#--not using yet...going to try to use mod2obs to interpolate...we'll see...
#--thin the data if more than one value in a month
#groups = df_mod.groupby([lambda x:x.year,lambda x:x.month]).count()
#print groups
#for tup,count_rec in groups.iteritems():
#    if count_rec.count() > 1:
#        print tup
#        print count_rec


#df_mod_resamp = df_mod.resample(seawat.pandas_freq,how='mean')
#non_nans = df_mod_resamp.count()
Beispiel #43
0
import pandas
import pestUtil as pu
import bro

#--model stress period date range
m_range = bro.sp_end

#--merged daily smp files
smp_dir = 'daily_stage_smp_navd\\'
smp_files = os.listdir(smp_dir)
out_dir = 'stressperiod_stage_smp_navd\\'
dfs = []
for i, sfile in enumerate(smp_files):
    print 'processing ', sfile, i, ' of ', len(smp_files), '\r',
    depname = sfile.split('.')[0]
    smp = pu.smp(smp_dir + sfile, load=True, pandas=True)

    df = smp.records
    rname = df.keys()[0]
    df[depname] = df[rname]
    df.pop(rname)
    df = df.astype(float)
    df = df.resample(bro.pandas_freq, how=np.mean)
    #--create dataframe that is aligned with model stress periods
    #--merge in the record and fill with 0.0
    df_mn = pandas.DataFrame({depname: np.NaN}, index=m_range)
    df_mn = df_mn.combine_first(df)
    df_mn = df_mn[bro.start:bro.end]
    df_mn = df_mn.dropna()
    dfs.append(df_mn)
    smp.records = df_mn
Beispiel #44
0
dry_month_end = 6
#--dimensions
nlay, nrow, ncol = 3, 189, 101
start_date = dt.datetime(year=1996, month=1, day=1, hour=12)
end_date = dt.datetime(year=2010, month=12, day=31, hour=12)
#end_date = dt.datetime(year=1998,month=1,day=31,hour=12)
num_days = end_date - start_date

extract_well = ['SC', 'SW', 'ORR']
#extract_well = ['SC']

#--read pumpage data from smp files
smp_ref = os.path.join('D:/', 'Data', 'Users', 'jdhughes', 'Projects',
                       '2080DBF00', 'UMD', 'Data', 'WEL',
                       'pumpwell2_extendedto2010.smp')
smp = pu.smp(smp_ref, load=True, date_fmt='%m/%d/%Y')
site_names = smp.get_unique_from_file(smp.site_index)

WellQ = 0.0
WellQDry = 0.0
IsDry = True
DryDays = 0
for iday in xrange(0, num_days.days + 1):
    ondate = start_date + dt.timedelta(days=iday)
    if IsDry == True:
        if ondate.month == dry_month_end:
            IsDry = False
    elif IsDry == False:
        if ondate.month == dry_month_start:
            IsDry = True
    if IsDry == True:
Beispiel #45
0
 #--initialize data
 inflow.fill(Missing)
 outflow.fill(Missing)
 sim.fill(Missing)
 pools = []
 pools_dict = {}
 #--get smp inflow files from xml file
 processInflow = False
 child = swbudget.find('inflow_items')
 if child != None:
     processInflow = True
     for childitem in child.findall('smpitem'):
         print 'processing inflow...{0}'.format(childitem.text)
         #--get the smp file data
         smpstation = os.path.basename(childitem.text).replace('.smp', '')
         smp = pestUtil.smp(childitem.text, load=True, date_fmt='%m/%d/%Y')
         inflow = dataAdd(sim_dates, inflow, smp.records[smpstation][:, 0],
                          smp.records[smpstation][:, 1])
 #--get smp outflow files from xml file
 child = swbudget.find('outflow_items')
 if child != None:
     for childitem in child.findall('smpitem'):
         print 'processing outflow...{0}'.format(childitem.text)
         #--get the smp file data
         smpstation = os.path.basename(childitem.text).replace('.smp', '')
         smp = pestUtil.smp(childitem.text, load=True, date_fmt='%m/%d/%Y')
         outflow = dataAdd(sim_dates, outflow, smp.records[smpstation][:,
                                                                       0],
                           smp.records[smpstation][:, 1])
 #--get station data from xml file
 station = swbudget.attrib['name']
while on_date < end_date:
    on_date += timedelta(days=1.)
    plot_dates.append( on_date )
plot_dates = np.array( plot_dates )

#--initialize figure and figure/plot counters
ifigure = 1
iplot = 1
nplots = 6
fig = Make_NewFigure()
#--matplotlib date specification
years, months = mdates.YearLocator(), mdates.MonthLocator()  #every year, every month
yearsFmt = mdates.DateFormatter('%Y')

#--open smp file
smp = pestUtil.smp(ObsFile,load=False,date_fmt='%m/%d/%Y')
#--process each station in the xml file
iwell = 0
pct_lay = np.empty( (nlay), np.float )
active_gwobs = []
active_gwobs_stats = []
for idx,gwhead in enumerate( root.findall('gwHead') ):
    station = gwhead.attrib['name']
    print 'Locating {0} in the model grid'.format( station )
    coordType = gwhead.find('coordType').text
    #--determine the location of the well
    if coordType.lower() == 'model':
        #--convert to zero based
        irow = int( gwhead.find('row').text ) - 1
        icol = int( gwhead.find('column').text ) - 1
    elif coordType.lower() == 'site':
Beispiel #47
0


#--for each shape, look for matching records
for record,shape in zip(records,shapes):
    #site_no = records['site_no'][i]
    site_no = record[site_no_idx]
    
    site_type = record[site_type_idx]

    #--look for a matching navd smp file
    record.append(-1)
    record.append('')
    for filename,attrib,pltname in zip(navd_files,navd_attribs,navd_pltnames):
        if attrib['site_no'] == site_no:
            smp = pestUtil.smp(navd_dir+filename,load=True,pandas=True)
            record[-2] = smp.records.shape[0]
            record[-1] = pltname
            break
    
    #--look for a matching conc smp file
    record.append(-1)
    record.append('')
    for filename,attrib,pltname in zip(conc_files,conc_attribs,conc_pltnames):
        if attrib['site_no'] == site_no:
            smp = pestUtil.smp(conc_dir+filename,load=True,pandas=True)
            record[-2] = smp.records.shape[0]
            record[-1] = pltname
            break
    if site_type.upper().startswith('GW'):
        wr_gw.poly([shape.points],shapeType=shape.shapeType)
f.close()


model_start = grid.start
obs_start = grid.start + timedelta(days=14)
obs_end = grid.sp_end[-2]
pred_start = grid.sp_end[-2]
pred_end = grid.end

date_dir = 'date_files\\'
tsproc_infile = 'tsproc_setup.dat'
tsp = tc.tsproc(tsproc_infile,out_file='processed.dat',out_fmt='long')

pest_oblocks,pest_mblocks = [],[]
hobs_file = '_misc\\heads.smp'
hobs_smp = pu.smp(hobs_file,date_fmt=tc.DATE_FMT,load=True)

mobs_file = 'mheads.smp'
mobs_smp = pu.smp(mobs_file,date_fmt=tc.DATE_FMT,load=True)

full_file = date_dir+'full_range.dat'
tc.write_date_file(full_file,obs_start,obs_end,None)                                            

pred_file = date_dir+'pred_range.dat'
tc.write_date_file(pred_file,pred_start,pred_end,None)                                            


#--swr - for prediction
ost_names = ['obf_1']
mst_names = ['mbf_1']
rgp_nums = [1]
#--main script
dry_month_start = 11
dry_month_end = 6
#--dimensions
nlay,nrow,ncol = 3,189,101
start_date = dt.datetime(year=1996,month=1,day=1,hour=12)
end_date = dt.datetime(year=2010,month=12,day=31,hour=12)
#end_date = dt.datetime(year=1998,month=1,day=31,hour=12)
num_days = end_date - start_date

extract_well = ['SC', 'SW', 'ORR']
#extract_well = ['SC']

#--read pumpage data from smp files
smp_ref = os.path.join( 'D:/','Data','Users','jdhughes','Projects','2080DBF00','UMD','Data','WEL', 'pumpwell2_extendedto2010.smp')
smp = pu.smp(smp_ref,load=True,date_fmt='%m/%d/%Y')
site_names = smp.get_unique_from_file(smp.site_index)

WellQ = 0.0
WellQDry = 0.0
IsDry = True
DryDays = 0
for iday in xrange(0,num_days.days+1):
    ondate = start_date + dt.timedelta(days=iday)
    if IsDry == True:
        if ondate.month == dry_month_end:
            IsDry = False
    elif IsDry == False:
        if ondate.month == dry_month_start:
            IsDry = True
    if IsDry == True:
Beispiel #50
0
    df = dfs[wnum]
    gm = geom[:, int(float(row)) - 1, int(float(col)) - 1]
    midpts, groups = group_intervals(df.keys(), gm)
    mod_dicts = {}
    for ilay, group in groups.iteritems():
        sname = 'ftl' + str(wnum) + '_' + str(ilay + 1) + 'L'
        print sname
        line = sname + ' {0:20.8G}  {1:20.8G} {2:d}\n'.format(x, y, ilay + 1)
        bore_coords_lines.append(line)
        #print seawat.layer_botm_names[ilay]
        mod_dicts[sname] = df[group].mean(axis=1).dropna()
    df = pandas.DataFrame(mod_dicts)
    df.to_csv(df_dir + str(wnum) + '_mod.csv', index_label='datetime')
    mod_dfs.append(df)
df = pandas.concat(mod_dfs, axis=1)
smp = pu.smp(None, load=False, pandas=True)
smp.records = df
smp_dir = '..\\..\\_ftl_salt\\'
smp.save(smp_dir + 'ftl_cali.smp', dropna=True)

f = open(smp_dir + 'ftl_borecoords.dat', 'w', 0)
for line in bore_coords_lines:
    f.write(line)

for wnum, df in zip(wellnums, mod_dfs):
    fig = pylab.figure()
    ax = pylab.subplot(111)
    for site, record in df.iteritems():
        print site
        ax.plot(record.index, record.values, '.', label=site)
    ax.grid()
f = open(stg_reach_file,'r')
reach_dict = {}
header = f.readline()
for line in f:
    raw = line.strip().split(',')
    name = raw[0].upper().replace(' ','_').replace('-','')
    if name.endswith('W'):
        name = name[:-1]
    reach_dict[name] = int(raw[1])
f.close()

#parser = lambda x: datetime.strptime(x,tc.DATE_FMT+' %H:%M:%S')
#stage_df = pandas.read_table(stg_obs_file,header=None,parse_dates=[[1,2]],date_parser=parser,sep='\s*')
#stage_df.columns = ['datetime','site','value']

stage_smp = pu.smp(stg_obs_file,date_fmt=tc.DATE_FMT,pandas=True,load=True)
stage_sites = stage_smp.records.keys()
for site in stage_sites:
    if site not in reach_dict.keys():
        print 'site not found in reach dict',site

obs_names = []
mod_names = []
reach_numbers = []
smp_site_names = []
for i,site in enumerate(reach_dict.keys()):
    if site not in stage_sites:
        print 'site not found in smp file',site
        reach_dict.pop(site)
    else:            
        obs_names.append('ost_{0:03.0f}or'.format(i+1))    
    #--check for different types of salt data
    if len(site_salt_params) > 1:
        #--for spec and chlorida
        if 'spec' in site_salt_params and 'chloride' in site_salt_params:
            spec_chl_idxs[site_no] = [site_salt_idxs[site_salt_params.index('spec')],site_salt_idxs[site_salt_params.index('chloride')]]
        if 'dissolved' in site_salt_params and 'chloride' in site_salt_params:
            tds_chl_idxs[site_no] = [site_salt_idxs[site_salt_params.index('dissolved')],site_salt_idxs[site_salt_params.index('chloride')]]


#--load the paired salt files and look for data on the same day
spec_data,chl_data = [],[]
dfs = []
site_nos = []
for site_no,idxs in spec_chl_idxs.iteritems():
    print 'loading smp files:',smp_files[idxs[0]],smp_files[idxs[1]]
    spec_smp = pestUtil.smp(smp_in_dir+smp_files[idxs[0]],load=True,pandas=True)
    chl_smp = pestUtil.smp(smp_in_dir+smp_files[idxs[1]],load=True,pandas=True)
    #--filter obvious outliers
    for dt,val in spec_smp.records['site'].iteritems():
        if val > 50000:
           spec_smp.records['site'][dt] = np.NaN
    df = pandas.DataFrame({'spec':spec_smp.records['site'],'chl':chl_smp.records['site']})    
    df = df.dropna()
    if not df.empty:
        dfs.append(df)
        site_nos.append(site_no)
       
df = pandas.concat(dfs,keys=site_nos) 

#--calc a filtered global regression equation
x,y = df['spec'].values.astype(np.float64),df['chl'].values.astype(np.float64)
import os
import shutil
import pandas
import pylab
import pestUtil as pu


mod2obs_ins = ['mod2obs.in','mod2obs_l.in','mod2obs_rc.in']
names = ['base','layer','rowcol']
colors = ['k','b','g']
dfs = []
exe = 'mod2obs.exe'
for m2o_in,name in zip(mod2obs_ins,names):
    cmd_line = exe + ' < ' + m2o_in
    print cmd_line
    os.system(cmd_line)
    shutil.copy('mheads.smp',name+'.smp')
    smp = pu.smp('mheads.smp',load=True,pandas=True)
    dfs.append(smp.records)

for site in dfs[0].columns:
    fig = pylab.figure(figsize=(6,6))
    ax = pylab.subplot(111)
    for df,name,color in zip(dfs,names,colors):
        ax.plot(df[site].index,df[site].values,color=color)
    ax.set_title(site)
    ax.legend(names)
    pylab.savefig('png\\'+site+'.png',fmt='png',dpi=300,bbox_inches='tight')