def load_shape_arrays(shapename,rowname='row',colname='column'):
    #--get the field names and make sure rowname and colname are found
    field_names = shapefile.get_fieldnames(grid_shapename)
    assert rowname in field_names
    assert colname in field_names

    #--get the decimal of each field - to get the array type later
    grid_shp = shapefile.Reader(shapename)
    header = grid_shp.dbfHeader()
    h_dict = {}
    for item in header:
        h_dict[item[0]] = int(item[-1])


    #--load all of the records as a dict
    records = shapefile.load_as_dict(shapename,loadShapes=False)
    
    #--get nrow and ncol
    nrow = max(records[rowname])
    ncol = max(records[colname])

    #--row and column maps
    row,col = records[rowname],records[colname]

    #--setup a dict for all of the arrays and map the values
    array_dict = {}
    for key,record in records.iteritems():
        
        decimal = h_dict[key]
        if decimal == 0:
            arr = np.zeros((nrow,ncol),dtype=np.int)               
        else: 
            arr = np.zeros((nrow,ncol))            
        print key,arr.dtype            
        try:
            for r,c,val in zip(row,col,record):
                arr[r-1,c-1] = val
            array_dict[key] = arr.copy()
        except:
            print 'couldnt cast '+str(key)+' field to array'
    


    return array_dict
def load_shape_arrays(shapename, rowname='row', colname='column'):
    #--get the field names and make sure rowname and colname are found
    field_names = shapefile.get_fieldnames(grid_shapename)
    assert rowname in field_names
    assert colname in field_names

    #--get the decimal of each field - to get the array type later
    grid_shp = shapefile.Reader(shapename)
    header = grid_shp.dbfHeader()
    h_dict = {}
    for item in header:
        h_dict[item[0]] = int(item[-1])

    #--load all of the records as a dict
    records = shapefile.load_as_dict(shapename, loadShapes=False)

    #--get nrow and ncol
    nrow = max(records[rowname])
    ncol = max(records[colname])

    #--row and column maps
    row, col = records[rowname], records[colname]

    #--setup a dict for all of the arrays and map the values
    array_dict = {}
    for key, record in records.iteritems():

        decimal = h_dict[key]
        if decimal == 0:
            arr = np.zeros((nrow, ncol), dtype=np.int)
        else:
            arr = np.zeros((nrow, ncol))
        print key, arr.dtype
        try:
            for r, c, val in zip(row, col, record):
                arr[r - 1, c - 1] = val
            array_dict[key] = arr.copy()
        except:
            print 'couldnt cast ' + str(key) + ' field to array'

    return array_dict
import pandas
from shapely.geometry import Polygon
import shapefile

nex_shapename = '..\\shapes\\NEXRAD_pixels_tsala'
grid_shapename = '..\\shapes\\join_all2'

df = pandas.read_csv('NEXRAD.csv', nrows=1)
df_keys = list(df.keys())

#--build nexrad polygons - just duplicate multipart polys
print 'loading grid shapefile'
nex_shapes, nex_recs = shapefile.load_as_dict(nex_shapename)
nex_polys, nex_pixelnums = [], []
print 'building nexrad polygons'
for shape, pnum in zip(nex_shapes, nex_recs['Pixel']):
    if str(pnum) in df_keys:
        if len(shape.parts) > 1:
            points = shape.points
            for i1, i2 in zip(shape.parts[:-1], shape.parts[1:]):
                poly = Polygon(shape.points[i1:i2])
                if not poly.is_valid:
                    raise Exception('invalid nexrad geometry' + str(pnum))
                nex_polys.append(poly)
                nex_pixelnums.append(pnum)
            #raise Exception('multipart nexrad shape'+str(rec))
        else:
            poly = Polygon(shape.points)
            if not poly.is_valid:
                raise Exception('invalid nexrad geometry' + str(pnum))
            nex_polys.append(poly)
Esempio n. 4
0
mpl.rcParams['font.monospace'] = 'Courier New'
mpl.rcParams['pdf.compression'] = 0
mpl.rcParams['pdf.fonttype'] = 42

ticksize = 6
mpl.rcParams['legend.fontsize'] = 6
mpl.rcParams['axes.labelsize'] = 8
mpl.rcParams['xtick.labelsize'] = ticksize
mpl.rcParams['ytick.labelsize'] = ticksize

#--load well locations and pandas dataframe
well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
well_points = sf.load_shape_list(well_shapename)
#shp = sf.reader(well_shapename)
#print sf.get_fieldnames(well_shapename)
records = sf.load_as_dict(well_shapename, loadShapes=False)
well_names = records['DPEP_NAME']
well_zbots = records['zbot']
float_zbots = []
for i, wb in enumerate(well_zbots):
    float_zbots.append(float(wb))
well_zbots = np.array(float_zbots)
well_rows, well_cols = records['row'], records['column']
pump = pandas.read_csv('..\\..\\_pumpage\\pws_filled_zeros.csv',
                       index_col=0,
                       parse_dates=True)

#--load lines and active dates
line_shapename = '..\\..\\_gis\shapes\sw_reaches'
lines = sf.load_shape_list(line_shapename)
shp = sf.Reader(line_shapename)
coastal_df = pandas.read_csv('..\\..\\_noaa\\noaa_slr.csv',
                             index_col=0,
                             parse_dates=True)
coastal_stages = coastal_df[flow.slr_scenario].values

#--process stage records for WCA ghbs - these records should have been presampled to stress period dimensions
print 'processing EDEN records'
df_eden = pandas.read_csv('..\\..\\_eden\\eden_sp.csv',
                          index_col=0,
                          parse_dates=True)
df_eden_monthly = df_eden.groupby(lambda x: x.month).mean()
print df_eden_monthly.index

print 'loading grid info for coastal GHBs'
g_att = shapefile.load_as_dict('..\\..\\_gis\\shapes\\broward_grid_master',
                               loadShapes=False,
                               attrib_name_list=['row', 'column', 'ibound_CS'])
icoast_rowcol, atlant_rowcol = [], []
for r, c, i in zip(g_att['row'], g_att['column'], g_att['ibound_CS']):
    if i in [51, 52, 53, 54, 55]:
        icoast_rowcol.append([r, c, i])
    elif i == 2:
        atlant_rowcol.append([r, c, i])

print 'loading grid info for WCA GHBs'
recs = shapefile.load_as_dict('..\\..\\_gis\\scratch\\broward_grid_eden',
                              loadShapes=False)
wca_r, wca_c, eden_fracs, eden_cells = recs['row'], recs['column'], recs[
    'eden_fracs'], recs['eden_cells']
#--cast fracs and cells
print 'casting fractions and cell numbers'
#--calc monthly average water levels for ns ghbs
df_ns_monthly = df_ns_data.groupby(lambda x:x.month).mean()
print df_ns_monthly.index
#--process stage record for coastal GHBs - presampled
coastal_df = pandas.read_csv('..\\..\\_noaa\\noaa_slr.csv',index_col=0,parse_dates=True)
coastal_stages = coastal_df[flow.slr_scenario].values

#--process stage records for WCA ghbs - these records should have been presampled to stress period dimensions
print 'processing EDEN records'
df_eden = pandas.read_csv('..\\..\\_eden\\eden_sp.csv',index_col=0,parse_dates=True)
df_eden_monthly = df_eden.groupby(lambda x:x.month).mean()
print df_eden_monthly.index

print 'loading grid info for coastal GHBs'
g_att = shapefile.load_as_dict('..\\..\\_gis\\shapes\\broward_grid_master',loadShapes=False,attrib_name_list=['row','column','ibound_CS'])
icoast_rowcol,atlant_rowcol = [],[]
for r,c,i in zip(g_att['row'],g_att['column'],g_att['ibound_CS']):
    if i in [51,52,53,54,55]:
        icoast_rowcol.append([r,c,i])
    elif i == 2:
        atlant_rowcol.append([r,c,i])


print 'loading grid info for WCA GHBs'
recs = shapefile.load_as_dict('..\\..\\_gis\\scratch\\broward_grid_eden',loadShapes=False)
wca_r,wca_c,eden_fracs,eden_cells = recs['row'],recs['column'],recs['eden_fracs'],recs['eden_cells']
#--cast fracs and cells
print 'casting fractions and cell numbers'
for i,fracs in enumerate(eden_fracs):
    raw = fracs.split()
Esempio n. 7
0
def setup():
    '''write an ssm key file and extract stress period nss lists to binary
    '''
    from bro import seawat as cal
    from bro_pred import seawat as pred

    #--build a swr reach tidal key
    f = open('..\\_BCDPEP\\BCDPEP_reach_conc.dat', 'r')
    f.readline()
    tidal_conc = {}
    for line in f:
        raw = line.strip().split(',')
        tidal_conc[int(raw[0])] = float(raw[1])
    f.close()

    #--group the tidal source reaches if the conc is the same
    tidal_rc, tidal_names = [], []
    concs, groups = [], []
    for sreach, conc in tidal_conc.iteritems():
        if conc in concs:
            groups[concs.index(conc)].append(sreach)
        else:
            groups.append([sreach])
            concs.append(conc)
            tidal_rc.append([])
            tidal_names.append([])

    import shapefile
    shapename = '..\\_gis\\scratch\\sw_reaches_conn_swrpolylines_2'
    recs = shapefile.load_as_dict(shapename, loadShapes=False)

    for r, c, sreach, sstruct, sname in zip(recs['ROW'], recs['COLUMN'],
                                            recs['SRC_reach'],
                                            recs['SRC_struct'],
                                            recs['SRC_name']):
        if sstruct == -1:
            for i, group in enumerate(groups):
                if sreach in group:
                    tidal_rc[i].append((r, c))
                    if sname not in tidal_names[i]:
                        tidal_names[i].append(sname)

    f = open('misc\\ssm_riv.key', 'w', 0)
    f.write('group_name,row,col,names\n')
    for i, [tups, names] in enumerate(zip(tidal_rc, tidal_names)):
        name = '_'.join(names).replace(' ', '_').replace(',', '_')
        for (r, c) in tups:
            f.write('riv_cn_#' + str(i + 1) + ',' + str(r) + ',' + str(c) +
                    ',' + name + '\n')
    f.close()

    #--build intercoastal ghb tidal key
    ibound = np.loadtxt('..\\_model\\bro.03\\seawatref\\ibound_CS.ref',
                        dtype=np.int)
    ic_groups = {}
    for i in range(ibound.shape[0]):
        for j in range(ibound.shape[1]):
            name = 'ghb_cn_#' + str(ibound[i, j])
            if name in ic_groups.keys():
                ic_groups[name].append((i + 1, j + 1))
            else:
                ic_groups[name] = [(i + 1, j + 1)]
    f = open('misc\\ssm_ghb.key', 'w', 0)
    f.write('name,row,col\n')
    for name, tups in ic_groups.iteritems():
        for (r, c) in tups:
            f.write(name + ',' + str(r) + ',' + str(c) + '\n')
    f.close()

    #--write an ssm template file - monthly
    par_dict = {}
    tpl_entries = {}
    months = calendar.month_abbr
    pnames = []
    tpl_dict = {}
    for riv_grp in range(len(tidal_rc)):
        tpl_entries = []
        for mn in months[1:]:
            pname = 'rcn_' + str(riv_grp + 1) + '_' + mn
            assert len(pname) <= 10, pname
            pnames.append(pname)
            tpl_entry = '~{0:25s}~'.format(pname)
            tpl_entries.append(tpl_entry)
        tpl_dict['riv_cn_#' + str(riv_grp + 1)] = tpl_entries
    par_dict['riv_conc'] = pnames
    pnames = []
    for ghb_grp in ic_groups.keys():
        grp_num = int(ghb_grp.split('#')[1])
        tpl_entries = []
        for mn in months[1:]:
            pname = 'gcn_' + str(grp_num) + '_' + mn
            assert len(pname) <= 10, pname
            pnames.append(pname)
            tpl_entry = '~{0:25s}~'.format(pname)
            tpl_entries.append(tpl_entry)
            tpl_dict['ghb_cn_#' + str(ghb_grp)] = tpl_entries
    par_dict['ghb_conc'] = pnames

    #--save the template file
    df = pandas.DataFrame(tpl_dict)
    df.index = df.index + 1
    f = open('tpl\\ghbwel_ssm.tpl', 'w', 0)
    f.write('ptf ~\n')
    df.to_csv(f, index_label='month')
    f.close()

    #--save a generic par file for testing
    for col in df.columns:
        df[col] = 1.0
    df.to_csv('par\\ghbwell_ssm.csv', index_label='month')

    #--write the pst components
    f_grp = open('pst_components\\ghbwel_ssm_grps.dat', 'w', 0)
    f_par = open('pst_components\\ghbwel_ssm_pars.dat', 'w', 0)
    pargps = par_dict.keys()
    pargps.sort()
    for pargp in pargps:
        pnames = par_dict[pargp]
        f_grp.write(
            '{0:<20s} factor 0.01  0.001 switch  2.0 parabolic\n'.format(
                pargp))
        for pname in pnames:

            f_par.write(
                '{0:<20s} log factor  1.0 1.0e-10 1.0e+10 {1:<20s}  1.0 0.0  0\n'
                .format(pname, pargp))
    f_grp.close()
    f_par.close()

    #--extract sp data and zip to binary
    #--load the key files into a dict
    itype_dict = {'riv': 4, 'ghb': 5}
    files = os.listdir('misc\\')
    key_files = []
    for f in files:
        if 'key' in f and 'ssm' in f:
            key_files.append(f)
    key_dict = {}
    for key_file in key_files:
        ptype = key_file.split('.')[0].split('_')[1]
        itype = itype_dict[ptype]
        f = open('misc\\' + key_file, 'r')
        header = f.readline()
        for line in f:
            raw = line.strip().split(',')
            r, c = int(raw[1]), int(raw[2])
            key_dict[(r, c, itype)] = raw[0]
        f.close()

    sp_lists = [cal.sp_start, pred.sp_start]
    for ssm_file, list_dir, sp_list in zip(SSM_FILES, LIST_DIRS, sp_lists):
        f = open(ssm_file, 'r')
        #--read the header info
        logicals = f.readline()
        maxssm = int(f.readline().strip())
        #--read the rch,ets junk
        rchets_lines = []
        for i in range(4):
            rchets_lines.append(f.readline().strip())
        #--start the sp loop
        kper = 0
        while True:
            try:
                nss = int(f.readline().strip())
            except:
                break
            lines = []
            #line_str = ''
            for i in range(nss):
                line = parse_ssm_line(f.readline())
                try:
                    line[-1] = key_dict[(line[1], line[2], line[4])]
                except:
                    pass
                #line_str += line
                lines.append(tuple(line))
            arr = np.array(lines, dtype=ssm_dtype_extend)
            dt = sp_list[kper]
            fname = list_dir + 'ssm_' + dt.strftime('%Y%m%d') + '_' + str(
                nss) + '.dat'
            print 'writing', fname
            #--for testing
            #np.savetxt('test.dat',arr,fmt=' %9d %9d %9d %15.6E %9d %20s')
            arr.tofile(fname)
            #--read the repeat rch and ets lines
            rch = f.readline()
            rch = f.readline()
            kper += 1
import numpy as np
import calendar
import shapefile
import pestUtil

#--load the grid shapefile that has the nexrad groups
shapename = 'shapes\\cwm_grid_groups'
records = shapefile.load_as_dict(shapename, ['row', 'column', 'nex_group'],
                                 loadShapes=False)

#--load the grid info
ginfo = pestUtil.load_grid_spec('misc\grid.spc')

#--fill in the groups array
grp_arr = np.zeros((ginfo['nrow'], ginfo['ncol']))

for r, c, g in zip(records['row'], records['column'], records['nex_group']):
    grp_arr[r - 1, c - 1] = g
np.savetxt('ref\UMD_nexrad.ref', grp_arr, fmt=' %2.0f')
num_grps = np.unique(grp_arr).shape[0]

f = open('fac\\nex_fac.dat', 'w')
f.write('cl_nexpts\n')
f.write('cl_mf_grid\n')
f.write('{0:10.0f}{1:10.0f}\n'.format(1, num_grps))
f.write('{0:10.0f}{1:10.0f}\n'.format(1, ginfo['nrow'] * ginfo['ncol']))
f.write('{0:10.0f}{1:10.0f}\n'.format(num_grps, ginfo['nrow'] * ginfo['ncol']))
f.write('{0:10.0f}\n'.format(1))

cell_num = 1
for i in range(ginfo['nrow']):
mpl.rcParams['pdf.compression']          = 0
mpl.rcParams['pdf.fonttype']             = 42

ticksize = 6
mpl.rcParams['legend.fontsize']  = 6
mpl.rcParams['axes.labelsize']   = 8
mpl.rcParams['xtick.labelsize']  = ticksize
mpl.rcParams['ytick.labelsize']  = ticksize


#--load well locations and pandas dataframe
well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
well_points = sf.load_shape_list(well_shapename)
#shp = sf.reader(well_shapename)
#print sf.get_fieldnames(well_shapename)
records = sf.load_as_dict(well_shapename,loadShapes=False)
well_names = records['DPEP_NAME']
well_zbots = records['zbot']
float_zbots = []
for i,wb in enumerate(well_zbots):
    float_zbots.append(float(wb))
well_zbots = np.array(float_zbots)
well_rows, well_cols = records['row'],records['column']
pump = pandas.read_csv('..\\..\\_pumpage\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

#--load lines and active dates
line_shapename = '..\\..\\_gis\shapes\sw_reaches'
lines = sf.load_shape_list(line_shapename)
shp = sf.Reader(line_shapename)
fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
#for i,fn in enumerate(fnames):
Esempio n. 10
0
def main(num_plots):

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename, loadShapes=False)
    well_names = records['DPEP_NAME']
    well_aban = records['ABAN_YEAR']
    act_wells = []
    for pt, ab in zip(well_points, well_aban):
        if ab == '':
            act_wells.append(pt)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)

    #--head stuff
    #--use bot of Q5 to check for dry cells
    #hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    #hds_layer_idx = 0
    #head_file = flow.root+'.hds'
    #headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    #htimes = headObj.get_time_list()

    #--conc stuff
    conc_lay_idxs = [0, 2, 3, 4, 5]
    conc_file = 'MT3D001.UCN'
    concObj = mfb.MT3D_Concentration(seawat.nlay, seawat.nrow, seawat.ncol,
                                     conc_file)
    ctimes = concObj.get_time_list()

    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\seawat\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i, [start, end] in enumerate(zip(seawat.sp_start, seawat.sp_end)):
        if i >= istart and i % sp_step == 0:
            print 'building args list for stress period ending on ', end
            #--find the conc output nearest the end of the stress period

            try:
                kper_seekpoints = ctimes[np.where(ctimes[:, 2] == i + 1), -1]
                c_seekpoint = long(kper_seekpoints[0][-1])

            except:
                break

            fig_name = plt_dir + 'sp{0:03.0f}_conc.png'.format(plt_num)
            fig_title = 'stress period ' + str(
                i + 1) + ' start date ' + start.strftime('%d/%m/%Y')
            args = [
                fig_name, c_seekpoint, conc_lay_idxs, lines, act_wells,
                fig_title
            ]
            q_args.append(args)
            plt_num += 1
            if num_plots != None and i > num_plots:
                break

    jobq = mp.JoinableQueue()

    #--for testing
    #if num_plots != None:
    #    jobq.put_nowait(q_args[0])
    #    jobq.put_nowait(None)
    #    plot_worker(jobq,1,conc_file)
    #    return

    procs = []
    num_procs = 4

    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker, args=(jobq, i, conc_file))
        p.daemon = True
        print 'starting process', p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    #--block until all finish
    for p in procs:
        p.join()
        print p.name, 'Finished'

    anim_name = 'png\\demo_conc.avi'
    if os.path.exists(anim_name):
        os.remove(anim_name)
    cmd_line = 'ffmpeg.exe -i png\\results\\seawat\\sp%03d_conc.png -r 24 ' + anim_name + ' -y'
    os.system(cmd_line)
    return
import os
import pandas
import shapefile
import dbhydro_util


df_dir = 'stage_dfs_navd\\'
smp_dir = 'stage_smp_navd\\'
ngvd_2_navd = -1.5
#--load the attributes of the structure shapefile
shapename = '..\\_gis\\shapes\\sw_structures'
records = shapefile.load_as_dict(shapename,attrib_name_list=['system','struct_num','dbhydro'],loadShapes=False)

#--get a list of stage records
stg_dir = 'SW\\STG\\'
stg_files = os.listdir(stg_dir)

#--build a list of station names
rec_attribs = []
for f in stg_files:
    fdict = dbhydro_util.parse_fname(f)    
    rec_attribs.append(fdict)

#--find all the records for each primary structure, headwater only
wl_stats = ['DA','BK','DWR','INST','FWM','MEAN']
for system,name in zip(records['system'],records['dbhydro']):
    if system == 1 and name != None:
        match_h,match_t = [],[]
        for i,rec in enumerate(rec_attribs):
            station = rec['STATION']
            if '_' in station:
Esempio n. 12
0
import math
import shapefile


def dist(point1, point2):
    xx = (point1[0] - point2[0])**2
    yy = (point1[1] - point2[1])**2
    return math.sqrt(xx + yy)


line_shapename = '..\\_gis\\shapes\\sw_reaches'
#shp_lines = shapefile.Reader(line_shapename)
#lines = shp_lines.shapes()
#l_records = shp_lines.records()
#l_name_idx,reach_idx = 0,2
lines, l_records = shapefile.load_as_dict(line_shapename)

#--tolerance distance
warn_dist = 50.0
tol_dist = 0.0

#--set the writer instance
in_shapename = '..\\_gis\\shapes\\sw_structures'
out_shapename = '..\\_gis\\scratch\\sw_structures_reaches'
shp_points = shapefile.Reader(in_shapename)
points = shp_points.shapes()
p_records = shp_points.records()
p_name_idx, dwnstr_idx = 4, 6

wr = shapefile.writer_like(in_shapename)
wr.field('upstream', fieldType='N', size=10)
Esempio n. 13
0
import os
import pandas
import shapefile
import dbhydro_util

df_dir = 'stage_dfs_navd\\'
smp_dir = 'stage_smp_navd\\'
ngvd_2_navd = -1.5
#--load the attributes of the structure shapefile
shapename = '..\\_gis\\shapes\\sw_structures'
records = shapefile.load_as_dict(
    shapename,
    attrib_name_list=['system', 'struct_num', 'dbhydro'],
    loadShapes=False)

#--get a list of stage records
stg_dir = 'SW\\STG\\'
stg_files = os.listdir(stg_dir)

#--build a list of station names
rec_attribs = []
for f in stg_files:
    fdict = dbhydro_util.parse_fname(f)
    rec_attribs.append(fdict)

#--find all the records for each primary structure, headwater only
wl_stats = ['DA', 'BK', 'DWR', 'INST', 'FWM', 'MEAN']
for system, name in zip(records['system'], records['dbhydro']):
    if system == 1 and name != None:
        match_h, match_t = [], []
        for i, rec in enumerate(rec_attribs):
import pandas
from shapely.geometry import Polygon
import shapefile

nex_shapename = '..\\shapes\\NEXRAD_pixels_tsala'
grid_shapename = '..\\shapes\\join_all2'

df = pandas.read_csv('NEXRAD.csv',nrows=1)
df_keys = list(df.keys())

#--build nexrad polygons - just duplicate multipart polys
print 'loading grid shapefile'
nex_shapes,nex_recs = shapefile.load_as_dict(nex_shapename)
nex_polys,nex_pixelnums = [],[]
print 'building nexrad polygons'
for shape,pnum in zip(nex_shapes,nex_recs['Pixel']):
    if str(pnum) in df_keys:
        if len(shape.parts) > 1:
            points = shape.points
            for i1,i2 in zip(shape.parts[:-1],shape.parts[1:]):
                poly = Polygon(shape.points[i1:i2])
                if not poly.is_valid:
                    raise Exception('invalid nexrad geometry'+str(pnum))   
                nex_polys.append(poly)
                nex_pixelnums.append(pnum)        
            #raise Exception('multipart nexrad shape'+str(rec))
        else:
            poly = Polygon(shape.points)
            if not poly.is_valid:
                raise Exception('invalid nexrad geometry'+str(pnum))
            nex_polys.append(poly)
def main():

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename,loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i,wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'],records['column']
    pump = pandas.read_csv('..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year,month=1,day=1)
        line_active.append(dt)


    #--head stuff
    #--use bot of Q5 to check for dry cells        
    hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    hds_layer_idx = 0
    head_file = flow.root+'.hds'
    headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    htimes = headObj.get_time_list()

    #--zeta stuff  
    zta_layer_idx = 0
    zta_elev = np.loadtxt(flow.ref_dir+'Q1_bot.ref')
    zeta_file = flow.root+'.zta'
    zetaObj = mfb.MODFLOW_CBB(flow.nlay,flow.nrow,flow.ncol,zeta_file)
    zta_text = '    ZETAPLANE  1'
    z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None
    
    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i,dt in enumerate(flow.sp_start):
        if i >= istart and i%sp_step == 0:
            print 'building args list for ',dt 
            try:
                h_seekpoint = long(htimes[i,3])
            except:
                break
            if zeta_file:
                z_seekpoint =  long(z1times[i,3])            
            else:
                z_seekpoint = None

            act_lines = []
            for ldt,line in zip(line_active,lines):
                if ldt <= dt:
                    act_lines.append(line)
    
            act_wells = []
            if i == 0:
                plt_start = dt
            else:
                plt_start = flow.sp_start[i-sp_step]
            plt_end = flow.sp_end[i]        
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()                          
            for wname,wpoint,wrow,wcol,wzbot in zip(well_names,well_points,well_rows,well_cols,well_zbots):            
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:    
                    act_wells.append(wpoint)            

            
            fig_name = plt_dir+'sp{0:03.0f}.png'.format(plt_num)
            #fig_title = 'stress period '+str(i+1)+' start date '+dt.strftime('%d/%m/%Y')
            fig_title = str(dt.year)
            args = [fig_name,h_seekpoint,z_seekpoint,act_lines,act_wells,hds_layer_idx,zta_layer_idx,fig_title]        
            q_args.append(args)
            plt_num += 1    


    jobq = mp.JoinableQueue() 
    
    
    #--for testing
    #jobq.put_nowait(q_args[0])
    #jobq.put_nowait(None)
    #plot_worker(jobq,0,head_file,None,hds_elev,zta_elev)
    #return       
    
    procs = []
    num_procs = 6
    
    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker,args=(jobq,i,head_file,zeta_file,hds_elev,zta_elev))
        p.daemon = True
        print 'starting process',p.name
        p.start()
        procs.append(p)
    
    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)      

    #--block until all finish
    for p in procs:
        p.join() 
        print p.name,'Finished' 
    
    cmd_line = 'ffmpeg.exe -i results\sp%03d.png -r 24 demo.avi -y'
    os.system(cmd_line)    
    return             
def main(num_plots):

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename,loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i,wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'],records['column']
    pump = pandas.read_csv('..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year,month=1,day=1)
        line_active.append(dt)


     #--head stuff
    #--use bot of Q5 to check for dry cells        
    #hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    #hds_layer_idx = 0
    #head_file = flow.root+'.hds'
    #headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    #htimes = headObj.get_time_list()

    #--conc stuff
    conc_lay_idxs = [0,5,9,11]
    conc_file = 'MT3D001.UCN'
    concObj = mfb.MT3D_Concentration(seawat.nlay,seawat.nrow,seawat.ncol,conc_file)
    ctimes = concObj.get_time_list()

   

    #--zeta stuff  
    #zta_layer_idx = 0
    #zta_elev = np.loadtxt(flow.ref_dir+'Q1_bot.ref')
    #zeta_file = flow.root+'.zta'
    #zetaObj = mfb.MODFLOW_CBB(flow.nlay,flow.nrow,flow.ncol,zeta_file)
    #zta_text = '    ZETAPLANE  1'
    #z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None
    
    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\seawat\\'
    
    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i,[start,end] in enumerate(zip(seawat.sp_start,seawat.sp_end)):
        if i >= istart and i%sp_step == 0:
            print 'building args list for stress period ending on ',end 
            #--find the conc output nearest the end of the stress period

            try:
                kper_seekpoints = ctimes[np.where(ctimes[:,2]==i+1),-1]
                c_seekpoint = long(kper_seekpoints[0][-1])

            except:
                break
            
            act_lines = []
            for ldt,line in zip(line_active,lines):
                if ldt <= start:
                    act_lines.append(line)
    
            act_wells = []
            if i == 0:
                plt_start = start
            else:
                plt_start = seawat.sp_start[i-sp_step]
            plt_end = seawat.sp_end[i]        
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()                          
            for wname,wpoint,wrow,wcol,wzbot in zip(well_names,well_points,well_rows,well_cols,well_zbots):            
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:    
                    act_wells.append(wpoint)            

            
            fig_name = plt_dir+'sp{0:03.0f}_conc.png'.format(plt_num)
            fig_title = 'stress period '+str(i+1)+' start date '+start.strftime('%d/%m/%Y')
            args = [fig_name,c_seekpoint,conc_lay_idxs,act_lines,act_wells,fig_title]        
            q_args.append(args)
            plt_num += 1   
            if num_plots != None and i > num_plots:
                break 


    jobq = mp.JoinableQueue() 
    
    
    #--for testing
    if num_plots != None:
        jobq.put_nowait(q_args[0])
        jobq.put_nowait(None)
        plot_worker(jobq,1,conc_file)
        return       
    
    procs = []
    num_procs = 3
    
    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker,args=(jobq,i,conc_file))
        p.daemon = True
        print 'starting process',p.name
        p.start()
        procs.append(p)
    
    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)      

    #--block until all finish
    for p in procs:
        p.join() 
        print p.name,'Finished' 
    
    anim_name = 'png\\demo_conc.avi'
    if os.path.exists(anim_name):
        os.remove(anim_name)
    cmd_line = 'ffmpeg.exe -i png\\results\\seawat\\sp%03d_conc.png -r 24 '+anim_name+' -y'
    os.system(cmd_line)    
    return                
Esempio n. 17
0
import os
import shutil
from datetime import datetime
import pandas
import shapefile
from simple import grid

f = open('settings.fig','w',0)
f.write('date = dd/mm/yyy\ncolrow=no\n')
f.close()

points,records = shapefile.load_as_dict('shapes\\simple_obs')
ids = records['Id']

#--set the obs at the bottom of upper
for olayer in range(grid.nlay):
    if grid.lay_key[olayer] != 'upper':
        break
print os.path.abspath('.\\')
f = open('_misc\\bore_coords.dat','w',0)
onames = []
for point,id in zip(points,ids):
    point = point.points[0]
    x,y = point
    oname = 'obs_'+str(id)
    onames.append(oname+'up')
    onames.append(oname+'lw')
    f.write('{0:25s} {1:15.5E} {2:15.5E} {3:6d}\n'.format(oname+'up',x,y,olayer))
    f.write('{0:25s} {1:15.5E} {2:15.5E} {3:6d}\n'.format(oname+'lw',x,y,9))
f.close()
            



top = np.loadtxt('..\\_model\\bro.01\\ref\\top_mod.ref')

xsec_dir = 'xsec_navd\\'
xsec_files = os.listdir(xsec_dir)
xsecs = {}
for xname in xsec_files:    
    header,xsec = swr.load_xsec(xsec_dir+xname)
    xsecs[xname] = xsec
    
shapename = '..\\_gis\\scratch\\sw_reaches_conn_SWRpolylines'
shapes,records = shapefile.load_as_dict(shapename)
wr = shapefile.Writer()
wr.field('reach',fieldType='N',size=10,decimal=0)
wr.field('top',fieldType='N',size=20,decimal=10)
wr.field('xsec_min',fieldType='N',size=20,decimal=10)
wr.field('xsec_max',fieldType='N',size=20,decimal=10)
wr.field('xsec_depth',fieldType='N',size=20,decimal=10)
wr.field('mod_depth',fieldType='N',size=20,decimal=10)
wr.field('depth_diff',fieldType='N',size=20,decimal=10)
wr.field('xsec_perm',fieldType='N',size=20,decimal=10)
wr.field('mod_perm',fieldType='N',size=20,decimal=10)
wr.field('perm_diff',fieldType='N',size=20,decimal=10)
#print shapefile.get_fieldnames(shapename)
reaches = records['REACH']
rows = records['ROW']
cols = records['COLUMN']
Esempio n. 19
0
try:
    if sys.argv[1].upper() == 'R':
        rst = True
except:
    pass

arr_prefix = 'nexrad_rech_inch_day\\rech'
pixel_file = 'pixel_data\\nexrad_inc138246_96_12_rainfall_ord.dat'

#arr_prefix = 'mm_day_pet\\pet'
#pixel_file = 'pet_pixel_all_ord.txt'

#--name of model grid shapefile with pixels attached - from make_pixel_map.py
print 'loading grid shapefile...'
shapefile_name = '..\\..\\_gis\\shapes\\broward_grid_master'
shapes, records = shapefile.load_as_dict(
    shapefile_name, attrib_name_list=['row', 'column', 'pixels', 'fractions'])
nrow, ncol = records['row'].max(), records['column'].max()
print 'done'
print 'nrow,ncol', nrow, ncol

#--load the pixel timeseries with
#--pixel, value and ord date index values for the timeseries file
p_idx, v_idx, d_idx = 0, 1, 2
print 'loading pixel timeseries file', pixel_file
pixel = np.loadtxt(pixel_file, usecols=[0, 2, 3], delimiter=',')
print 'done - ', pixel.shape[0], ' records loaded'

#--get a list of the unique ordinal days in pixel time series
pixel_days = np.unique(pixel[:, d_idx])

#--process each day
import os
import numpy as np
from scipy.interpolate import Rbf

import shapefile
import simple

#--first write pilot points file from topot_points shape
shapename = 'shapes\\simple_topo_points'
f = open('misc\\topo_points.dat','w',0)
points,rec_dict = shapefile.load_as_dict(shapename)
xs,ys,ztops = [],[],[]
for pt,id,ztop in zip(points,rec_dict['Id'],rec_dict['ztop']):
    x,y = pt.points[0][0],pt.points[0][1]
    if x in xs and y in ys:
        print 'dup',id
    else:
        xs.append(x)
        ys.append(y)
        ztops.append(ztop)
    pname = 'topo_{0:02d}'.format(id)
    f.write('{0:20s} {1:15E} {2:15E}  1   {3:15E}\n'.format(pname,x,y,float(ztop)))
f.close()
xs,ys,ztops = np.array(xs),np.array(ys),np.array(ztops)
zs = np.ones(xs.shape)

#--write the grid file
f = open('misc\\simple.grd','w',0)
f.write('{0:10d} {1:10d}\n'.format(simple.grid.nrow,simple.grid.ncol))
f.write('{0:15.5G} {1:15.5G} 0.0\n'.format(simple.grid.xmin,simple.grid.cols[-1]))
for dr in simple.grid.delr:
Esempio n. 21
0
def write():
    #--get the well locations
    shapename = '..\\shapes\\simple_well_grid_join'
    records = shapefile.load_as_dict(shapename, loadShapes=False)
    rows, cols, ztops, zbots, ids, hydros = records['row'], records[
        'column_'], records['ztop'], records['zbot'], records['Id'], records[
            'hydro']

    #--random sampling
    #--first create normal distributions for the wells
    upper_mean, upper_std = -500.0, 10.0
    lower_mean, lower_std = -5000.0, 50.0
    nper = len(grid.sp_start)
    well_records = []
    for id, hydro in zip(ids, hydros):
        if hydro == 3:
            rec = np.random.normal(lower_mean, lower_std, nper)
        if hydro == 1:
            rec = np.random.normal(upper_mean, upper_std, nper)
        rec = add_zeros(rec)
        well_records.append(rec)
    well_records = np.array(well_records).transpose()
    well_records[np.where(well_records > 0.0)] = 0.0
    mnw_ds2 = []
    names = []
    for id, r, c, top, bot, hydro in zip(ids, rows, cols, ztops, zbots,
                                         hydros):
        #--mnw
        if hydro == 1:
            name = 'upper_' + str(id)
        else:
            name = 'lower_' + str(id)
        names.append(name)
        line_2a = '{0:20s}{1:10f}{2:>33s}\n'.format(name, -1, '#2a')
        mnw_ds2.append(line_2a)
        line_2b = '{0:20s}{1:10d}{2:10d}{3:10d}{4:10d} #2b\n'.format(
            'THIEM', 0, 0, 0, 0)
        mnw_ds2.append(line_2b)
        line_2c = '{0:10.4f}{1:>53s}\n'.format(1.0, '#2c')
        mnw_ds2.append(line_2c)
        line_2d2 = ' {0:9.4f} {1:9.4f} {2:9.0f} {3:9.0f}{4:>26s}\n'.format(
            float(top), float(bot), int(float(r)), int(float(c)), '#2d-2\n')
        mnw_ds2.append(line_2d2)

    f_mnw = open(grid.modelname + '.mnw', 'w', 0)
    f_mnw.write('# ' + sys.argv[0] + ' ' + str(datetime.now()) + '\n')
    f_mnw.write(' {0:9.0f} {1:9.0f} {2:9.0f}\n'.format(len(ids), 0, 0))
    for line in mnw_ds2:
        f_mnw.write(line)
    upper_layers = [3, 4, 5, 6]
    lower_layers = [10, 11, 12, 13, 14, 15, 16, 17, 18]
    f_wel = open(grid.modelname + '.wel', 'w', 0)
    f_wel.write('# ' + sys.argv[0] + ' ' + str(datetime.now()) + '\n')
    f_wel.write(' {0:9.0f} {1:9.0f} {2:9.0f}\n'.format(300, 0, 0))
    for i, slice in enumerate(well_records):
        lines = []
        for name, rate in zip(names, slice):
            lines.append('{0:20s}{1:15.4G}\n'.format(name, rate))
        f_mnw.write('{0:10d} {1:20s} {2:3d}\n'.format(len(lines),
                                                      '#3 Stress Period',
                                                      i + 1))
        for line in lines:
            f_mnw.write(line)

        lines = []
        for name, rate, row, col in zip(names, slice, rows, cols):
            if 'upper' in name:
                layers = upper_layers
            else:
                layers = lower_layers
            rate /= float(len(layers))
            for lay in layers:
                line = '{0:10d}{1:10d}{2:10d}{3:15.4E}  #{4:20s}\n'\
                    .format(lay,int(float(row)),int(float(col)),rate,name)
                lines.append(line)
        f_wel.write('{0:10d}{1:10d} #{2:20s}{3:4d}\n'.format(
            len(lines), 0, 'stress period ', i + 1))
        for line in lines:
            f_wel.write(line)
    f_mnw.close()
    f_wel.close()
import os
from datetime import datetime,timedelta
import numpy as np
from scipy.io.netcdf import netcdf_file as ncdf
import pandas
import shapefile


#--get a list of the eden masterid numbers that are needed
recs = shapefile.load_as_dict('..\\_gis\\scratch\\broward_grid_eden',attrib_name_list=['eden_cells'],loadShapes=False)
masterids = []
for r in recs['eden_cells']:
    raw = r.split()
    for rr in raw:
        if int(rr) not in masterids:
            masterids.append(int(rr))
             
#--get the x,y of the masterids
recs = shapefile.load_as_dict('..\\_gis\\shapes\\EDEN_grid_poly_Jan_10_sp',loadShapes=False)
xs,ys = [],[]
for x,y,m in zip(recs['X_COORD'],recs['Y_COORD'],recs['MASTERID']):
    if int(m) in masterids:
        xs.append(float(x))
        ys.append(float(y))


cdf_dir = 'surface_netcdf\\'
cdf_files = os.listdir(cdf_dir)

#--group files by year
cdf_years = []
def main(num_plots):

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename, loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i, wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'], records['column']
    pump = pandas.read_csv(
        '..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',
        index_col=0,
        parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename, ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year, month=1, day=1)
        line_active.append(dt)

    #--head stuff
    #--use bot of Q5 to check for dry cells
    #hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    #hds_layer_idx = 0
    #head_file = flow.root+'.hds'
    #headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    #htimes = headObj.get_time_list()

    #--conc stuff
    conc_lay_idxs = [0, 5, 9, 11]
    conc_file = 'MT3D001.UCN'
    concObj = mfb.MT3D_Concentration(seawat.nlay, seawat.nrow, seawat.ncol,
                                     conc_file)
    ctimes = concObj.get_time_list()

    #--zeta stuff
    #zta_layer_idx = 0
    #zta_elev = np.loadtxt(flow.ref_dir+'Q1_bot.ref')
    #zeta_file = flow.root+'.zta'
    #zetaObj = mfb.MODFLOW_CBB(flow.nlay,flow.nrow,flow.ncol,zeta_file)
    #zta_text = '    ZETAPLANE  1'
    #z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None

    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\seawat\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i, [start, end] in enumerate(zip(seawat.sp_start, seawat.sp_end)):
        if i >= istart and i % sp_step == 0:
            print 'building args list for stress period ending on ', end
            #--find the conc output nearest the end of the stress period

            try:
                kper_seekpoints = ctimes[np.where(ctimes[:, 2] == i + 1), -1]
                c_seekpoint = long(kper_seekpoints[0][-1])

            except:
                break

            act_lines = []
            for ldt, line in zip(line_active, lines):
                if ldt <= start:
                    act_lines.append(line)

            act_wells = []
            if i == 0:
                plt_start = start
            else:
                plt_start = seawat.sp_start[i - sp_step]
            plt_end = seawat.sp_end[i]
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()
            for wname, wpoint, wrow, wcol, wzbot in zip(
                    well_names, well_points, well_rows, well_cols, well_zbots):
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:
                    act_wells.append(wpoint)

            fig_name = plt_dir + 'sp{0:03.0f}_conc.png'.format(plt_num)
            fig_title = 'stress period ' + str(
                i + 1) + ' start date ' + start.strftime('%d/%m/%Y')
            args = [
                fig_name, c_seekpoint, conc_lay_idxs, act_lines, act_wells,
                fig_title
            ]
            q_args.append(args)
            plt_num += 1
            if num_plots != None and i > num_plots:
                break

    jobq = mp.JoinableQueue()

    #--for testing
    if num_plots != None:
        jobq.put_nowait(q_args[0])
        jobq.put_nowait(None)
        plot_worker(jobq, 1, conc_file)
        return

    procs = []
    num_procs = 3

    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker, args=(jobq, i, conc_file))
        p.daemon = True
        print 'starting process', p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    #--block until all finish
    for p in procs:
        p.join()
        print p.name, 'Finished'

    anim_name = 'png\\demo_conc.avi'
    if os.path.exists(anim_name):
        os.remove(anim_name)
    cmd_line = 'ffmpeg.exe -i png\\results\\seawat\\sp%03d_conc.png -r 24 ' + anim_name + ' -y'
    os.system(cmd_line)
    return
#--load the botm's - for the standard wel package
botm = np.zeros((flow.nlay+1,flow.nrow,flow.ncol)) - 1.0e10
botm[0,:,:] = np.loadtxt(flow.ref_dir+'top_layering.ref')
for i,prefix in enumerate(flow.layer_botm_names):
    lay_botm = np.loadtxt(flow.ref_dir+prefix+'_bot.ref')    
    botm[i+1,:,:] = lay_botm

botm2 = np.zeros((seawat.nlay+1,seawat.nrow,seawat.ncol)) - 1.0e10
botm2[0,:,:] = np.loadtxt(seawat.ref_dir+'top_layering.ref')
for i,prefix in enumerate(seawat.layer_botm_names):
    lay_botm = np.loadtxt(seawat.ref_dir+prefix+'_bot.ref')    
    botm2[i+1,:,:] = lay_botm


pws_shapename = '..\\..\\_gis\\shapes\\pws_combine'
pws_recs = shapefile.load_as_dict(pws_shapename,loadShapes=False)
mnw_ds2 = []
dfs = []
#f = open('test.dat','w')
count = 1
inactive = []
wel_rcl = {}
wel_rcl2 = {}
nwells_flow,nwells_seawat = 0,0
for dep_name,r,c,top,bot,ibnd in zip(pws_recs['DPEP_NAME'],pws_recs['row'],pws_recs['column'],pws_recs['ztop'],pws_recs['zbot'],pws_recs['ibound_CS']):
    if ibnd > 0:
        if dep_name in pump_df.keys():                
            #--mnw 
            line_2a = dep_name.ljust(20)+'{0:10.0f}'.format(-1)+'{0:>33s}'.format('#2a')+'\n'
            mnw_ds2.append(line_2a)
            line_2b = 'THIEM'.ljust(19)+'0'.ljust(10)+'0'.ljust(10)+'0'.ljust(10)+'0'.ljust(10)+' #2b\n'
Esempio n. 25
0
for the area of each polygon.
genereate using pixel_grid_union.py
'''

#--name of unioned pixel-grid shapefile
pixelgrid_shapename = '..\\..\\_gis\\scratch\\broward_pixel_grid'
#--name of pixels shape
pixel_shapename = '..\\..\\_gis\\shapes\\NEXRAD_pixels_broward'
#--name of grid shapefile
grid_shapename = '..\\..\\_gis\\shapes\\broward_grid_master'
#--name of new grid shapefile with pixel info attached
new_grid_shapename = '..\\..\\_gis\\scratch\\broward_grid_pixelmap'

#--load the pixel shapes into shapely - need to use centroid attribute later
print 'loading pixel shapefile...'
pixel_shapes,pixel_records = shapefile.load_as_dict(pixel_shapename)
pixel_polys = []
for p_shape in pixel_shapes:
    pixel_polys.append(Polygon(p_shape.points))
print 'done'

print 'loading pixel grid shapefile...'
pg_shapes,pg_records = shapefile.load_as_dict(pixelgrid_shapename,attrib_name_list=['pixel','cellnum','area'])
print pg_records.keys()
#pg_row,pg_col = np.array(pg_records['row']),np.array(pg_records['column'])
pg_pixel,pg_area = np.array(pg_records['pixel']),np.array(pg_records['area'])
pg_cellnum = np.array(pg_records['cellnum'])
#pg_delx,pg_dely = np.array(pg_records['delx']),np.array(pg_records['dely'])
print 'done'

# --load the botm's - for the standard wel package
botm = np.zeros((flow.nlay + 1, flow.nrow, flow.ncol)) - 1.0e10
botm[0, :, :] = np.loadtxt(flow.ref_dir + "top_layering.ref")
for i, prefix in enumerate(flow.layer_botm_names):
    lay_botm = np.loadtxt(flow.ref_dir + prefix + "_bot.ref")
    botm[i + 1, :, :] = lay_botm

botm2 = np.zeros((seawat.nlay + 1, seawat.nrow, seawat.ncol)) - 1.0e10
botm2[0, :, :] = np.loadtxt(seawat.ref_dir + "top_layering.ref")
for i, prefix in enumerate(seawat.layer_botm_names):
    lay_botm = np.loadtxt(seawat.ref_dir + prefix + "_bot.ref")
    botm2[i + 1, :, :] = lay_botm


pws_shapename = "..\\..\\_gis\\shapes\\pws_combine"
pws_recs = shapefile.load_as_dict(pws_shapename, loadShapes=False)
mnw_ds2 = []
dfs = []
# f = open('test.dat','w')
count = 1
inactive = []
wel_rcl = {}
wel_rcl2 = {}
nwells_flow, nwells_seawat = 0, 0
for dep_name, r, c, top, bot, ibnd in zip(
    pws_recs["DPEP_NAME"],
    pws_recs["row"],
    pws_recs["column"],
    pws_recs["ztop"],
    pws_recs["zbot"],
    pws_recs["ibound_CS"],
Esempio n. 27
0
import os
import numpy as np
from scipy.interpolate import Rbf

import shapefile
import simple

#--first write pilot points file from topot_points shape
shapename = 'shapes\\simple_topo_points'
f = open('misc\\topo_points.dat', 'w', 0)
points, rec_dict = shapefile.load_as_dict(shapename)
xs, ys, ztops = [], [], []
for pt, id, ztop in zip(points, rec_dict['Id'], rec_dict['ztop']):
    x, y = pt.points[0][0], pt.points[0][1]
    if x in xs and y in ys:
        print 'dup', id
    else:
        xs.append(x)
        ys.append(y)
        ztops.append(ztop)
    pname = 'topo_{0:02d}'.format(id)
    f.write('{0:20s} {1:15E} {2:15E}  1   {3:15E}\n'.format(
        pname, x, y, float(ztop)))
f.close()
xs, ys, ztops = np.array(xs), np.array(ys), np.array(ztops)
zs = np.ones(xs.shape)

#--write the grid file
f = open('misc\\simple.grd', 'w', 0)
f.write('{0:10d} {1:10d}\n'.format(simple.grid.nrow, simple.grid.ncol))
f.write('{0:15.5G} {1:15.5G} 0.0\n'.format(simple.grid.xmin,
def main():

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename, loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i, wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'], records['column']
    pump = pandas.read_csv(
        '..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',
        index_col=0,
        parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename, ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year, month=1, day=1)
        line_active.append(dt)

    #--head stuff
    #--use bot of Q5 to check for dry cells
    hds_elev = np.loadtxt(flow.ref_dir + 'Q5_bot.ref')
    hds_layer_idx = 0
    head_file = flow.root + '.hds'
    headObj = mfb.MODFLOW_Head(flow.nlay, flow.nrow, flow.ncol, head_file)
    htimes = headObj.get_time_list()

    #--zeta stuff
    zta_layer_idx = 0
    zta_elev = np.loadtxt(flow.ref_dir + 'Q1_bot.ref')
    zeta_file = flow.root + '.zta'
    zetaObj = mfb.MODFLOW_CBB(flow.nlay, flow.nrow, flow.ncol, zeta_file)
    zta_text = '    ZETAPLANE  1'
    z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None

    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i, dt in enumerate(flow.sp_start):
        if i >= istart and i % sp_step == 0:
            print 'building args list for ', dt
            try:
                h_seekpoint = long(htimes[i, 3])
            except:
                break
            if zeta_file:
                z_seekpoint = long(z1times[i, 3])
            else:
                z_seekpoint = None

            act_lines = []
            for ldt, line in zip(line_active, lines):
                if ldt <= dt:
                    act_lines.append(line)

            act_wells = []
            if i == 0:
                plt_start = dt
            else:
                plt_start = flow.sp_start[i - sp_step]
            plt_end = flow.sp_end[i]
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()
            for wname, wpoint, wrow, wcol, wzbot in zip(
                    well_names, well_points, well_rows, well_cols, well_zbots):
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:
                    act_wells.append(wpoint)

            fig_name = plt_dir + 'sp{0:03.0f}.png'.format(plt_num)
            #fig_title = 'stress period '+str(i+1)+' start date '+dt.strftime('%d/%m/%Y')
            fig_title = str(dt.year)
            args = [
                fig_name, h_seekpoint, z_seekpoint, act_lines, act_wells,
                hds_layer_idx, zta_layer_idx, fig_title
            ]
            q_args.append(args)
            plt_num += 1

    jobq = mp.JoinableQueue()

    #--for testing
    #jobq.put_nowait(q_args[0])
    #jobq.put_nowait(None)
    #plot_worker(jobq,0,head_file,None,hds_elev,zta_elev)
    #return

    procs = []
    num_procs = 6

    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker,
                       args=(jobq, i, head_file, zeta_file, hds_elev,
                             zta_elev))
        p.daemon = True
        print 'starting process', p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    #--block until all finish
    for p in procs:
        p.join()
        print p.name, 'Finished'

    cmd_line = 'ffmpeg.exe -i results\sp%03d.png -r 24 demo.avi -y'
    os.system(cmd_line)
    return
import shapefile

#--shapenames
sc_shapename = '..\\_gis\\scratch\\pws_K_locations'
apt_shapename = '..\\_gis\\scratch\\apt_K_locations'

sc_shapes,sc_records = shapefile.load_as_dict(sc_shapename)
apt_shapes,apt_records = shapefile.load_as_dict(apt_shapename)

wr = shapefile.Writer()
wr.field('top',fieldType='N',size=10,decimal=1)
wr.field('bot',fieldType='N',size=10,decimal=1)
wr.field('K_ftday',fieldType='N',size=30,decimal=10)

for shape,top,bot,k in zip(sc_shapes,sc_records['top'],sc_records['bot'],sc_records['K']):
    wr.poly([shape.points],shapeType=shape.shapeType)
    wr.record([top,bot,k])

for shape,top,bot,k in zip(apt_shapes,apt_records['top'],apt_records['bot'],apt_records['K']):
    wr.poly([shape.points],shapeType=shape.shapeType)
    wr.record([top,bot,k])

wr.save('..\\_gis\\scratch\\all_K_locations')
def prep():
    upper_layers = [3,4,5,6]
    lower_layers = [10,11,12,13]

    print 'loading pumping well locations'
    shapename = 'shapes\\simple_well_grid_join'
    records = shapefile.load_as_dict(shapename,loadShapes=False)
    rows,cols,ztops,zbots,ids,hydros = \
        records['row'],records['column_'],records['ztop'],\
        records['zbot'],records['Id'],records['hydro']

    upper_flux,lower_flux = -500.0,-1000.0
    upper_std,lower_std = 10.0,10.0
    wel_row,wel_col,wel_lay,wel_name = [],[],[],[]
    wel_flux = []
    pred_name = 'lower_7'
    pred_rate = -2500.0
    for r,c,id,hydro in zip(rows,cols,ids,hydros):
        if id == pred_rate:
            layers = lower_layers
            name = 'pred_'+str(id)
            flux = pred_rate
        elif hydro == 1:
            layers = upper_layers
            name = 'upper_'+str(id)
            flux = upper_flux
        else:
            layers = lower_layers
            name = 'lower_'+str(id)
            flux = lower_flux
        for l in layers:
            wel_row.append(int(float(r)))
            wel_col.append(int(float(c)))
            wel_lay.append(int(float(l)))
            wel_name.append(name)
            wel_flux.append(flux)


    print 'building ghb and wel lrc lists from ibounds'
    ghb_stage,ghb_cond = 95.0,50000.0
    ghb_rows,ghb_cols,ghb_lays,ghb_stages,ghb_conds,ghb_names = [],[],[],[],[],[]
    flux_rows,flux_cols,flux_lays,flux_names = [],[],[],[]
    ghb_stage_rate = 0.0005
    dwn_j = 204
    for k,iname in enumerate(grid.ibound_names):
        arr = np.loadtxt('_model\\'+iname)
        for i in range(grid.nrow):
            for j in range(grid.ncol):
                if arr[i,j] == 4:
                    ghb_rows.append(i+1)
                    ghb_cols.append(j+1)
                    ghb_lays.append(k+1)
                    if j < dwn_j:
                        ghb_stages.append(ghb_stage + ((dwn_j - j) * ghb_stage_rate))
                    elif j == dwn_j:
                        ghb_stages.append(ghb_stage)
                    elif j > dwn_j:
                        ghb_stages.append(ghb_stage + ((j - dwn_j) * ghb_stage_rate))
                elif arr[i,j] == 2 and grid.lay_key[k] == 'upper':
                    flux_rows.append(i+1)
                    flux_cols.append(j+1)
                    flux_lays.append(k+1) 
                    flux_names.append('flux_'+str(k+1))
                        


    wel_row.extend(flux_rows)
    wel_col.extend(flux_cols)
    wel_lay.extend(flux_lays)
    wel_name.extend(flux_names)
    #--calc the flux on the north
    ncells = len(flux_rows)
    xsec = 5.0 * 100.0
    target_rate = 0.02
    flux = xsec * target_rate
    


    #wel_flux.extend([flux]*len(flux_rows))
    df_wel = pandas.DataFrame({'layer':wel_lay,'row':wel_row,'column':wel_col,'name':wel_name})
    df_wel.to_csv('_misc\\well_locs.csv')

    df_ghb = pandas.DataFrame({'layer':ghb_lays,'row':ghb_rows,'column':ghb_cols,'stage':ghb_stages})
    df_ghb['conductance'] = ghb_cond
    df_ghb.to_csv('_misc\\ghb_locs.csv')

    #--write tpl files for each 
    step = relativedelta(months=1)
    day = grid.start
    d_count = 1
    p_count = 1
    wel_pnames = []
    pdict = {}
    while day < grid.end:
        day_entries = []
        last = wel_name[0]
        for i,name in enumerate(wel_name):
            if 'flux' in name:
                name = name.split('_')[0]

            if name != last:
                p_count += 1
                last = name
            pname = 'w{0:04d}_{1:04d}'.format(p_count,d_count)
            if pname not in wel_pnames:
                wel_pnames.append(pname)
            tpl_string = '~{0:20s}~'.format(pname)
            day_entries.append(tpl_string)
        pdict[day.strftime('%Y%m%d')] = day_entries
        day += step
        d_count += 1
    wel_tpl = pandas.DataFrame(pdict,index=wel_name)
    grouped = wel_tpl.groupby(level=0).last()   
    f = open('tpl\\wel.tpl','w')
    f.write('ptf ~\n')
    grouped.to_csv(f,index_label='wel_name')
    f.close()
    wel_tpl.dtype = np.float64
    
    for i,col in enumerate(wel_tpl.columns):
        wel_tpl[col] = 1.0
    grouped = wel_tpl.groupby(level=0).last()
    grouped.to_csv('par\\wel.dat',index_label='wel_name')
    #--generate markov series for the upper/lower and flux
    #--set the last entry to the max for the prediction
    flux_series = [float(flux)]
    beta = 0.5 
    for i in range(wel_tpl.shape[1]-1):
        innov = np.random.normal(0.0,flux*0.1)

        val = float(flux_series[-1]) + (beta * innov)
        print val
        flux_series.append(val)  
    flux_series[-1] = min(flux_series)
    lower_series = [lower_flux]
    beta = 0.5 
    for i in range(wel_tpl.shape[1]-1):
        innov = np.random.normal(0.0,lower_std)
        val = lower_series[-1] + (beta * innov)
        lower_series.append(val)  
    #lower_series = np.array(lower_series) - lower_flux
    wnames = []
    for wname in wel_tpl.index:
        if wname == pred_name:
            wseries = np.zeros((len(lower_series)))
            wseries[-1] = pred_rate                   
            wel_tpl.ix[wname] = wseries

        elif 'upper' in wname:
            wseries = lower_series + np.random.normal(0.0,10.0,wel_tpl.shape[1])
            wseries[np.where(wseries > 0.0)] = 0.0
            wseries[-1] = np.min(wseries)
            wel_tpl.ix[wname] = wseries 
        elif 'lower' in wname:
            wseries = lower_series + np.random.normal(0.0,10.0,wel_tpl.shape[1])
            wseries[np.where(wseries > 0.0)] = 0.0
            wseries[-1] = np.min(wseries)
            wel_tpl.ix[wname] = wseries - (lower_flux - upper_flux)
        elif 'flux' in wname:
            wel_tpl.ix[wname] = flux_series
        wnames.append(wname)
    grouped = wel_tpl.groupby(level=0).last()
     
    grouped.to_csv('base\\wel.dat',index_label='wel_name')
    #wel_tpl.dtype = np.float64
    
    #wel_tpl.ix['flux_1'].T.plot(legend=False)
    #wel_tpl.ix['upper_1'].T.plot(legend=False)
    #wel_tpl.ix['upper_5'].T.plot(legend=False)
    #wel_tpl.ix['lower_2'].T.plot(legend=False)
    #pylab.show()

    pdict = {}
    day = grid.start
    d_count = 1
    ghb_pnames = []
    while day < grid.end:
        day_entries = []    
        for ptype in ['stg','cnd']:
            pname = '{0:3s}_{1:04d}'.format(ptype,d_count)
            ghb_pnames.append(pname)
            tpl_string = '~{0:20s}~'.format(pname)
            day_entries.append(tpl_string)
        pdict[day.strftime('%Y%m%d')] = day_entries
        day += step
        d_count += 1
    ghb_tpl = pandas.DataFrame(pdict,index=('stage','conductance'))
    f = open('tpl\\ghb.tpl','w')
    f.write('ptf ~\n')
    ghb_tpl.to_csv(f,index_label='ptype')
    f.close()
    for i,col in enumerate(ghb_tpl.columns):
        ghb_tpl[col] = 1

    ghb_tpl.to_csv('par\\ghb.dat',index_label='ptype')
    


    f = open('pst_components\\ghbwel_pars.dat','w',0)
    for pname in wel_pnames:
        f.write('{0:20s}  log   factor  1.0  1.0e-10  1.0e+10  well_mult   1.0   0.0   1\n'.format(pname))
    for pname in ghb_pnames:
        f.write('{0:20s}  log   factor  1.0  1.0e-10  1.0e+10  ghb_mult   1.0   0.0   1\n'.format(pname))
    f.close()
    f = open('pst_components\\ghbwel_grps.dat','w',0)
    f.write('well_mult       relative     1.0000E-02   0.000      switch      2.000      parabolic\n')
    f.write('ghb_mult        relative     1.0000E-02   0.000      switch      2.000      parabolic\n')
    f.close()
Esempio n. 31
0
try:
    if sys.argv[1].upper() == 'R':
        rst = True
except:
    pass           

arr_prefix = 'nexrad_rech_inch_day\\rech'
pixel_file = 'pixel_data\\nexrad_inc138246_96_12_rainfall_ord.dat'

#arr_prefix = 'mm_day_pet\\pet'
#pixel_file = 'pet_pixel_all_ord.txt'

#--name of model grid shapefile with pixels attached - from make_pixel_map.py
print 'loading grid shapefile...'
shapefile_name = '..\\..\\_gis\\shapes\\broward_grid_master'
shapes,records = shapefile.load_as_dict(shapefile_name,attrib_name_list=['row','column','pixels','fractions'])
nrow,ncol = records['row'].max(),records['column'].max()        
print 'done'
print 'nrow,ncol',nrow,ncol

#--load the pixel timeseries with 
#--pixel, value and ord date index values for the timeseries file
p_idx,v_idx,d_idx = 0,1,2 
print 'loading pixel timeseries file',pixel_file
pixel = np.loadtxt(pixel_file,usecols=[0,2,3],delimiter=',')
print 'done - ',pixel.shape[0],' records loaded'

#--get a list of the unique ordinal days in pixel time series
pixel_days = np.unique(pixel[:,d_idx])

#--process each day    
Esempio n. 32
0
import sys
import math
import shapefile

def dist(point1,point2):
    xx = (point1[0] - point2[0])**2
    yy = (point1[1] - point2[1])**2
    return math.sqrt(xx+yy)

line_shapename = '..\\_gis\\shapes\\sw_reaches'
#shp_lines = shapefile.Reader(line_shapename)
#lines = shp_lines.shapes()
#l_records = shp_lines.records()
#l_name_idx,reach_idx = 0,2
lines,l_records = shapefile.load_as_dict(line_shapename)


#--tolerance distance
warn_dist = 50.0
tol_dist = 0.0

#--set the writer instance
in_shapename = '..\\_gis\\shapes\\sw_structures'
out_shapename = '..\\_gis\\scratch\\sw_structures_reaches'
shp_points = shapefile.Reader(in_shapename)
points = shp_points.shapes()
p_records = shp_points.records()
p_name_idx,dwnstr_idx = 4,6

wr = shapefile.writer_like(in_shapename)
wr.field('upstream',fieldType='N',size=10)
Esempio n. 33
0
#--coordinate information
x0, y0 = 539750.0, 2785750.0
dx, dy = 500., 500.
xcell, ycell = mfd.cell_coordinates(nrow, ncol, dx, dy)
xcell += x0
ycell += y0
xedge, yedge = mfd.edge_coordinates(nrow, ncol, dx, dy)
xedge += x0
yedge += y0
xmin, xmax = xedge.min(), xedge.max()
ymin, ymax = yedge.min(), yedge.max()
#--read shapefile with cross-section data
shape_name = os.path.join('..', 'GIS', 'UMDCrossSections')
print shape_name
xsect_list = sf.load_shape_list(shape_name)
shapes, records = sf.load_as_dict(shape_name)
nxsect = len(shapes)
xsect_name = []
xsect_label = []
for idx in xrange(0, nxsect):
    xsect_name.append(records['XSECT'][idx].replace(' ', ''))
    xsect_label.append(records['Label'][idx])
#--create crossection figures
dxsect = 5.
ifigure = 0
lay_width = [0.5, 0.5, 0.5, 0.5]
lay_color = ['k', 'k', 'k', 'k']
#             fresh     brackish  saltwater
#surf_color = ['#40d3f7','#4E8975','#F76541']
surf_color = ['#40d3f7', '#40d3f7', '#F76541']
for idx, xsect in enumerate(xsect_list):
Esempio n. 34
0
def prep():
    upper_layers = [3, 4, 5, 6]
    lower_layers = [10, 11, 12, 13]

    print 'loading pumping well locations'
    shapename = 'shapes\\simple_well_grid_join'
    records = shapefile.load_as_dict(shapename, loadShapes=False)
    rows,cols,ztops,zbots,ids,hydros = \
        records['row'],records['column_'],records['ztop'],\
        records['zbot'],records['Id'],records['hydro']

    upper_flux, lower_flux = -500.0, -1000.0
    upper_std, lower_std = 10.0, 10.0
    wel_row, wel_col, wel_lay, wel_name = [], [], [], []
    wel_flux = []
    pred_name = 'lower_7'
    pred_rate = -2500.0
    for r, c, id, hydro in zip(rows, cols, ids, hydros):
        if id == pred_rate:
            layers = lower_layers
            name = 'pred_' + str(id)
            flux = pred_rate
        elif hydro == 1:
            layers = upper_layers
            name = 'upper_' + str(id)
            flux = upper_flux
        else:
            layers = lower_layers
            name = 'lower_' + str(id)
            flux = lower_flux
        for l in layers:
            wel_row.append(int(float(r)))
            wel_col.append(int(float(c)))
            wel_lay.append(int(float(l)))
            wel_name.append(name)
            wel_flux.append(flux)

    print 'building ghb and wel lrc lists from ibounds'
    ghb_stage, ghb_cond = 95.0, 50000.0
    ghb_rows,ghb_cols,ghb_lays,ghb_stages,ghb_conds,ghb_names = [],[],[],[],[],[]
    flux_rows, flux_cols, flux_lays, flux_names = [], [], [], []
    ghb_stage_rate = 0.0005
    dwn_j = 204
    for k, iname in enumerate(grid.ibound_names):
        arr = np.loadtxt('_model\\' + iname)
        for i in range(grid.nrow):
            for j in range(grid.ncol):
                if arr[i, j] == 4:
                    ghb_rows.append(i + 1)
                    ghb_cols.append(j + 1)
                    ghb_lays.append(k + 1)
                    if j < dwn_j:
                        ghb_stages.append(ghb_stage +
                                          ((dwn_j - j) * ghb_stage_rate))
                    elif j == dwn_j:
                        ghb_stages.append(ghb_stage)
                    elif j > dwn_j:
                        ghb_stages.append(ghb_stage +
                                          ((j - dwn_j) * ghb_stage_rate))
                elif arr[i, j] == 2 and grid.lay_key[k] == 'upper':
                    flux_rows.append(i + 1)
                    flux_cols.append(j + 1)
                    flux_lays.append(k + 1)
                    flux_names.append('flux_' + str(k + 1))

    wel_row.extend(flux_rows)
    wel_col.extend(flux_cols)
    wel_lay.extend(flux_lays)
    wel_name.extend(flux_names)
    #--calc the flux on the north
    ncells = len(flux_rows)
    xsec = 5.0 * 100.0
    target_rate = 0.02
    flux = xsec * target_rate

    #wel_flux.extend([flux]*len(flux_rows))
    df_wel = pandas.DataFrame({
        'layer': wel_lay,
        'row': wel_row,
        'column': wel_col,
        'name': wel_name
    })
    df_wel.to_csv('_misc\\well_locs.csv')

    df_ghb = pandas.DataFrame({
        'layer': ghb_lays,
        'row': ghb_rows,
        'column': ghb_cols,
        'stage': ghb_stages
    })
    df_ghb['conductance'] = ghb_cond
    df_ghb.to_csv('_misc\\ghb_locs.csv')

    #--write tpl files for each
    step = relativedelta(months=1)
    day = grid.start
    d_count = 1
    p_count = 1
    wel_pnames = []
    pdict = {}
    while day < grid.end:
        day_entries = []
        last = wel_name[0]
        for i, name in enumerate(wel_name):
            if 'flux' in name:
                name = name.split('_')[0]

            if name != last:
                p_count += 1
                last = name
            pname = 'w{0:04d}_{1:04d}'.format(p_count, d_count)
            if pname not in wel_pnames:
                wel_pnames.append(pname)
            tpl_string = '~{0:20s}~'.format(pname)
            day_entries.append(tpl_string)
        pdict[day.strftime('%Y%m%d')] = day_entries
        day += step
        d_count += 1
    wel_tpl = pandas.DataFrame(pdict, index=wel_name)
    grouped = wel_tpl.groupby(level=0).last()
    f = open('tpl\\wel.tpl', 'w')
    f.write('ptf ~\n')
    grouped.to_csv(f, index_label='wel_name')
    f.close()
    wel_tpl.dtype = np.float64

    for i, col in enumerate(wel_tpl.columns):
        wel_tpl[col] = 1.0
    grouped = wel_tpl.groupby(level=0).last()
    grouped.to_csv('par\\wel.dat', index_label='wel_name')
    #--generate markov series for the upper/lower and flux
    #--set the last entry to the max for the prediction
    flux_series = [float(flux)]
    beta = 0.5
    for i in range(wel_tpl.shape[1] - 1):
        innov = np.random.normal(0.0, flux * 0.1)

        val = float(flux_series[-1]) + (beta * innov)
        print val
        flux_series.append(val)
    flux_series[-1] = min(flux_series)
    lower_series = [lower_flux]
    beta = 0.5
    for i in range(wel_tpl.shape[1] - 1):
        innov = np.random.normal(0.0, lower_std)
        val = lower_series[-1] + (beta * innov)
        lower_series.append(val)
    #lower_series = np.array(lower_series) - lower_flux
    wnames = []
    for wname in wel_tpl.index:
        if wname == pred_name:
            wseries = np.zeros((len(lower_series)))
            wseries[-1] = pred_rate
            wel_tpl.ix[wname] = wseries

        elif 'upper' in wname:
            wseries = lower_series + np.random.normal(0.0, 10.0,
                                                      wel_tpl.shape[1])
            wseries[np.where(wseries > 0.0)] = 0.0
            wseries[-1] = np.min(wseries)
            wel_tpl.ix[wname] = wseries
        elif 'lower' in wname:
            wseries = lower_series + np.random.normal(0.0, 10.0,
                                                      wel_tpl.shape[1])
            wseries[np.where(wseries > 0.0)] = 0.0
            wseries[-1] = np.min(wseries)
            wel_tpl.ix[wname] = wseries - (lower_flux - upper_flux)
        elif 'flux' in wname:
            wel_tpl.ix[wname] = flux_series
        wnames.append(wname)
    grouped = wel_tpl.groupby(level=0).last()

    grouped.to_csv('base\\wel.dat', index_label='wel_name')
    #wel_tpl.dtype = np.float64

    #wel_tpl.ix['flux_1'].T.plot(legend=False)
    #wel_tpl.ix['upper_1'].T.plot(legend=False)
    #wel_tpl.ix['upper_5'].T.plot(legend=False)
    #wel_tpl.ix['lower_2'].T.plot(legend=False)
    #pylab.show()

    pdict = {}
    day = grid.start
    d_count = 1
    ghb_pnames = []
    while day < grid.end:
        day_entries = []
        for ptype in ['stg', 'cnd']:
            pname = '{0:3s}_{1:04d}'.format(ptype, d_count)
            ghb_pnames.append(pname)
            tpl_string = '~{0:20s}~'.format(pname)
            day_entries.append(tpl_string)
        pdict[day.strftime('%Y%m%d')] = day_entries
        day += step
        d_count += 1
    ghb_tpl = pandas.DataFrame(pdict, index=('stage', 'conductance'))
    f = open('tpl\\ghb.tpl', 'w')
    f.write('ptf ~\n')
    ghb_tpl.to_csv(f, index_label='ptype')
    f.close()
    for i, col in enumerate(ghb_tpl.columns):
        ghb_tpl[col] = 1

    ghb_tpl.to_csv('par\\ghb.dat', index_label='ptype')

    f = open('pst_components\\ghbwel_pars.dat', 'w', 0)
    for pname in wel_pnames:
        f.write(
            '{0:20s}  log   factor  1.0  1.0e-10  1.0e+10  well_mult   1.0   0.0   1\n'
            .format(pname))
    for pname in ghb_pnames:
        f.write(
            '{0:20s}  log   factor  1.0  1.0e-10  1.0e+10  ghb_mult   1.0   0.0   1\n'
            .format(pname))
    f.close()
    f = open('pst_components\\ghbwel_grps.dat', 'w', 0)
    f.write(
        'well_mult       relative     1.0000E-02   0.000      switch      2.000      parabolic\n'
    )
    f.write(
        'ghb_mult        relative     1.0000E-02   0.000      switch      2.000      parabolic\n'
    )
    f.close()
def setup():
    '''write an ssm key file and extract stress period nss lists to binary
    '''
    from bro import seawat as cal
    from bro_pred import seawat as pred

    
    
    #--build a swr reach tidal key
    f = open('..\\_BCDPEP\\BCDPEP_reach_conc.dat','r')
    f.readline()
    tidal_conc = {}
    for line in f:
        raw = line.strip().split(',')
        tidal_conc[int(raw[0])] = float(raw[1])
    f.close()

    #--group the tidal source reaches if the conc is the same
    tidal_rc,tidal_names = [],[]  
    concs,groups = [],[]
    for sreach,conc in tidal_conc.iteritems():
        if conc in concs:
            groups[concs.index(conc)].append(sreach)
        else:
            groups.append([sreach])
            concs.append(conc)
            tidal_rc.append([])
            tidal_names.append([])
    
    import shapefile
    shapename = '..\\_gis\\scratch\\sw_reaches_conn_swrpolylines_2'
    recs = shapefile.load_as_dict(shapename,loadShapes=False)
    
    for r,c,sreach,sstruct,sname in zip(recs['ROW'],recs['COLUMN'],recs['SRC_reach'],recs['SRC_struct'],recs['SRC_name']):
        if sstruct == -1:
            for i,group in enumerate(groups):
                if sreach in group:
                    tidal_rc[i].append((r,c))
                    if sname not in tidal_names[i]:
                        tidal_names[i].append(sname)

    f = open('misc\\ssm_riv.key','w',0)
    f.write('group_name,row,col,names\n')
    for i,[tups,names] in enumerate(zip(tidal_rc,tidal_names)):
        name = '_'.join(names).replace(' ','_').replace(',','_')
        for (r,c) in tups:
            f.write('riv_cn_#'+str(i+1)+','+str(r)+','+str(c)+','+name+'\n')
    f.close()        


    #--build intercoastal ghb tidal key
    ibound = np.loadtxt('..\\_model\\bro.03\\seawatref\\ibound_CS.ref',dtype=np.int)
    ic_groups = {}
    for i in range(ibound.shape[0]):
        for j in range(ibound.shape[1]):            
            name = 'ghb_cn_#'+str(ibound[i,j])
            if name in ic_groups.keys():
                ic_groups[name].append((i+1,j+1))
            else:
                ic_groups[name] = [(i+1,j+1)]
    f = open('misc\\ssm_ghb.key','w',0)
    f.write('name,row,col\n')
    for name,tups in ic_groups.iteritems():
        for (r,c) in tups:
            f.write(name+','+str(r)+','+str(c)+'\n')
    f.close()
    
    #--write an ssm template file - monthly
    par_dict = {}
    tpl_entries = {}
    months = calendar.month_abbr
    pnames = []
    tpl_dict = {}        
    for riv_grp in range(len(tidal_rc)):
        tpl_entries = []
        for mn in months[1:]:    
            pname = 'rcn_'+str(riv_grp+1)+'_'+mn
            assert len(pname) <= 10,pname
            pnames.append(pname)
            tpl_entry = '~{0:25s}~'.format(pname)
            tpl_entries.append(tpl_entry)
        tpl_dict['riv_cn_#'+str(riv_grp+1)] = tpl_entries
    par_dict['riv_conc'] = pnames
    pnames = []
    for ghb_grp in ic_groups.keys():
        grp_num = int(ghb_grp.split('#')[1])
        tpl_entries = []
        for mn in months[1:]:    
            pname = 'gcn_'+str(grp_num)+'_'+mn
            assert len(pname) <= 10,pname
            pnames.append(pname)
            tpl_entry = '~{0:25s}~'.format(pname)
            tpl_entries.append(tpl_entry)
            tpl_dict['ghb_cn_#'+str(ghb_grp)] = tpl_entries
    par_dict['ghb_conc'] = pnames

    #--save the template file
    df = pandas.DataFrame(tpl_dict)
    df.index = df.index + 1
    f = open('tpl\\ghbwel_ssm.tpl','w',0)
    f.write('ptf ~\n')
    df.to_csv(f,index_label='month')
    f.close()

    #--save a generic par file for testing
    for col in df.columns:
        df[col] = 1.0
    df.to_csv('par\\ghbwell_ssm.csv',index_label='month')

    #--write the pst components
    f_grp = open('pst_components\\ghbwel_ssm_grps.dat','w',0)
    f_par = open('pst_components\\ghbwel_ssm_pars.dat','w',0)
    pargps = par_dict.keys()
    pargps.sort()
    for pargp in pargps:
        pnames = par_dict[pargp]
        f_grp.write('{0:<20s} factor 0.01  0.001 switch  2.0 parabolic\n'.format(pargp))
        for pname in pnames:

            f_par.write('{0:<20s} log factor  1.0 1.0e-10 1.0e+10 {1:<20s}  1.0 0.0  0\n'.format(pname,pargp))
    f_grp.close()
    f_par.close()

    #--extract sp data and zip to binary   
    #--load the key files into a dict
    itype_dict = {'riv':4,'ghb':5}
    files = os.listdir('misc\\')
    key_files = []
    for f in files:
        if 'key' in f and 'ssm' in f:
            key_files.append(f)
    key_dict = {}
    for key_file in key_files:
        ptype = key_file.split('.')[0].split('_')[1]
        itype = itype_dict[ptype]
        f = open('misc\\'+key_file,'r')
        header = f.readline()
        for line in f:
            raw = line.strip().split(',')
            r,c = int(raw[1]),int(raw[2])       
            key_dict[(r,c,itype)] = raw[0]
        f.close()
    
         
    sp_lists = [cal.sp_start,pred.sp_start]    
    for ssm_file,list_dir,sp_list in zip(SSM_FILES,LIST_DIRS,sp_lists):
        f = open(ssm_file,'r')
        #--read the header info
        logicals = f.readline()
        maxssm = int(f.readline().strip())
        #--read the rch,ets junk
        rchets_lines = []
        for i in range(4):
            rchets_lines.append(f.readline().strip())
        #--start the sp loop
        kper = 0
        while True:
            try:
                nss = int(f.readline().strip())
            except:
                break
            lines = []
            #line_str = ''
            for i in range(nss):
                line = parse_ssm_line(f.readline())
                try:
                    line[-1] = key_dict[(line[1],line[2],line[4])]
                except:
                    pass
                #line_str += line                
                lines.append(tuple(line))                                
            arr = np.array(lines,dtype=ssm_dtype_extend) 
            dt = sp_list[kper]
            fname = list_dir+'ssm_'+dt.strftime('%Y%m%d')+'_'+str(nss)+'.dat'
            print 'writing',fname
            #--for testing
            #np.savetxt('test.dat',arr,fmt=' %9d %9d %9d %15.6E %9d %20s')
            arr.tofile(fname)
            #--read the repeat rch and ets lines
            rch = f.readline()
            rch = f.readline()
            kper += 1
import numpy as np
import shapefile

shapename = '..\\_gis\\scratch\\sw_reaches_coastal_conc'
records = shapefile.load_as_dict(shapename,loadShapes=False)
print records.keys()
conc_key = 'max_relcon'
reach_key = 'reach'
tidal_key = 'stage_rec'
f = open('BCDPEP_reach_conc.dat','w',0)
f.write('source_reach,rel_conc\n')
for conc,reach,tidal in zip(records[conc_key],records[reach_key],records[tidal_key]):
    if 'COASTAL' in tidal.upper():
        f.write(str(int(float(reach)))+','+str(conc)+'\n')
f.close()

#--coordinate information
x0, y0  = 539750.0, 2785750.0
dx,dy   = 500., 500.
xcell, ycell = mfd.cell_coordinates(nrow,ncol,dx,dy)
xcell += x0
ycell += y0
xedge, yedge = mfd.edge_coordinates(nrow,ncol,dx,dy)
xedge += x0
yedge += y0
xmin,xmax = xedge.min(),xedge.max()
ymin,ymax = yedge.min(),yedge.max()
#--read shapefile with cross-section data
shape_name = os.path.join( '..','GIS','UMDCrossSections' )
print shape_name
xsect_list = sf.load_shape_list(shape_name)
shapes,records = sf.load_as_dict(shape_name)
nxsect = len(shapes)
xsect_name = []
xsect_label = []
for idx in xrange( 0, nxsect ):
    xsect_name.append( records['XSECT'][idx].replace(' ','') )
    xsect_label.append( records['Label'][idx] )
#--create crossection figures
dxsect = 5.
ifigure = 0
lay_width = [0.5,0.5,0.5,0.5]
lay_color = ['k','k','k','k']
#             fresh     brackish  saltwater
#surf_color = ['#40d3f7','#4E8975','#F76541']
surf_color = ['#40d3f7','#40d3f7','#F76541']
for idx,xsect in enumerate( xsect_list ):
Esempio n. 38
0
                else:
                    groups[i] = [intrv]
                    midpoints[i] = mid
    return midpoints, groups


#--build the geo array
geom = np.zeros((seawat.nlay + 1, seawat.nrow, seawat.ncol))
geom[0, :, :] = np.loadtxt(seawat.top_name)
for i, lay in enumerate(seawat.layer_botm_names):
    arr = np.loadtxt(seawat.ref_dir + lay + '_bot.ref')
    geom[i + 1, :, :] = arr

#--load ftl salt grid shapefile
shapename = '..\\..\\_gis\\scratch\\ftl_salt_grid'
shapes, records = shapefile.load_as_dict(shapename)
wellnums, rows, cols = records['Id'], records['row'], records['column_']
xs, ys = [], []
for s in shapes:
    x, y = s.points[0]
    xs.append(x)
    ys.append(y)

#--load the relconc dataframes
df_dir = '..\\..\\_ftl_salt\\dataframes\\'
files = os.listdir(df_dir)
dfs = {}
for f in files:
    if 'relconc' in f:
        print f
        wellnum = int(f.split('_')[0].split('-')[1])
import numpy as np
import calendar
import shapefile
import pestUtil

#--load the grid shapefile that has the nexrad groups
shapename = 'shapes\\cwm_grid_groups'
records = shapefile.load_as_dict(shapename,['row','column','nex_group'],loadShapes=False)

#--load the grid info
ginfo = pestUtil.load_grid_spec('misc\grid.spc')

#--fill in the groups array
grp_arr = np.zeros((ginfo['nrow'],ginfo['ncol']))

for r,c,g in zip(records['row'],records['column'],records['nex_group']):
    grp_arr[r-1,c-1] = g
np.savetxt('ref\UMD_nexrad.ref',grp_arr,fmt=' %2.0f')
num_grps = np.unique(grp_arr).shape[0]

f = open('fac\\nex_fac.dat','w')
f.write('cl_nexpts\n')
f.write('cl_mf_grid\n')
f.write('{0:10.0f}{1:10.0f}\n'.format(1,num_grps))
f.write('{0:10.0f}{1:10.0f}\n'.format(1,ginfo['nrow']*ginfo['ncol']))
f.write('{0:10.0f}{1:10.0f}\n'.format(num_grps,ginfo['nrow']*ginfo['ncol']))
f.write('{0:10.0f}\n'.format(1))

cell_num = 1         
for i in range(ginfo['nrow']):
    for j in range(ginfo['ncol']):
def write():
    #--get the well locations
    shapename = '..\\shapes\\simple_well_grid_join'
    records = shapefile.load_as_dict(shapename,loadShapes=False)
    rows,cols,ztops,zbots,ids,hydros = records['row'],records['column_'],records['ztop'],records['zbot'],records['Id'],records['hydro']


    #--random sampling
    #--first create normal distributions for the wells
    upper_mean,upper_std = -500.0,10.0
    lower_mean,lower_std = -5000.0,50.0
    nper = len(grid.sp_start)
    well_records = []
    for id,hydro in zip(ids,hydros):
        if hydro == 3:
            rec = np.random.normal(lower_mean,lower_std,nper)
        if hydro == 1:
            rec = np.random.normal(upper_mean,upper_std,nper)
        rec = add_zeros(rec)
        well_records.append(rec)
    well_records = np.array(well_records).transpose()
    well_records[np.where(well_records>0.0)] = 0.0
    mnw_ds2 = []
    names = []
    for id,r,c,top,bot,hydro in zip(ids,rows,cols,ztops,zbots,hydros):
        #--mnw 
        if hydro ==1:
            name = 'upper_'+str(id)
        else:
            name = 'lower_'+str(id)
        names.append(name)
        line_2a = '{0:20s}{1:10f}{2:>33s}\n'.format(name,-1,'#2a')
        mnw_ds2.append(line_2a)
        line_2b = '{0:20s}{1:10d}{2:10d}{3:10d}{4:10d} #2b\n'.format('THIEM',0,0,0,0)
        mnw_ds2.append(line_2b)
        line_2c = '{0:10.4f}{1:>53s}\n'.format(1.0,'#2c')
        mnw_ds2.append(line_2c)
        line_2d2 = ' {0:9.4f} {1:9.4f} {2:9.0f} {3:9.0f}{4:>26s}\n'.format(float(top),float(bot),int(float(r)),int(float(c)),'#2d-2\n')
        mnw_ds2.append(line_2d2) 

    f_mnw = open(grid.modelname+'.mnw','w',0)
    f_mnw.write('# '+sys.argv[0]+' '+str(datetime.now())+'\n')
    f_mnw.write(' {0:9.0f} {1:9.0f} {2:9.0f}\n'.format(len(ids),0,0))    
    for line in mnw_ds2:
        f_mnw.write(line)
    upper_layers = [3,4,5,6]
    lower_layers = [10,11,12,13,14,15,16,17,18]
    f_wel = open(grid.modelname+'.wel','w',0)
    f_wel.write('# '+sys.argv[0]+' '+str(datetime.now())+'\n')
    f_wel.write(' {0:9.0f} {1:9.0f} {2:9.0f}\n'.format(300,0,0))    
    for i,slice in enumerate(well_records):
        lines = []
        for name,rate in zip(names,slice):
            lines.append('{0:20s}{1:15.4G}\n'.format(name,rate))
        f_mnw.write('{0:10d} {1:20s} {2:3d}\n'.format(len(lines),'#3 Stress Period',i+1))
        for line in lines:
            f_mnw.write(line)
        
        lines = []
        for name,rate,row,col in zip(names,slice,rows,cols):
            if 'upper' in name:
                layers = upper_layers
            else:
                layers = lower_layers
            rate /= float(len(layers))
            for lay in layers:
                line = '{0:10d}{1:10d}{2:10d}{3:15.4E}  #{4:20s}\n'\
                    .format(lay,int(float(row)),int(float(col)),rate,name)
                lines.append(line)
        f_wel.write('{0:10d}{1:10d} #{2:20s}{3:4d}\n'.format(len(lines),0,'stress period ',i+1))        
        for line in lines:
            f_wel.write(line)
    f_mnw.close()
    f_wel.close()
import os
from datetime import datetime, timedelta
import numpy as np
from scipy.io.netcdf import netcdf_file as ncdf
import pandas
import shapefile

#--get a list of the eden masterid numbers that are needed
recs = shapefile.load_as_dict('..\\_gis\\scratch\\broward_grid_eden',
                              attrib_name_list=['eden_cells'],
                              loadShapes=False)
masterids = []
for r in recs['eden_cells']:
    raw = r.split()
    for rr in raw:
        if int(rr) not in masterids:
            masterids.append(int(rr))

#--get the x,y of the masterids
recs = shapefile.load_as_dict('..\\_gis\\shapes\\EDEN_grid_poly_Jan_10_sp',
                              loadShapes=False)
xs, ys = [], []
for x, y, m in zip(recs['X_COORD'], recs['Y_COORD'], recs['MASTERID']):
    if int(m) in masterids:
        xs.append(float(x))
        ys.append(float(y))

cdf_dir = 'surface_netcdf\\'
cdf_files = os.listdir(cdf_dir)

#--group files by year