def load_shape_arrays(shapename,rowname='row',colname='column'):
    #--get the field names and make sure rowname and colname are found
    field_names = shapefile.get_fieldnames(grid_shapename)
    assert rowname in field_names
    assert colname in field_names

    #--get the decimal of each field - to get the array type later
    grid_shp = shapefile.Reader(shapename)
    header = grid_shp.dbfHeader()
    h_dict = {}
    for item in header:
        h_dict[item[0]] = int(item[-1])


    #--load all of the records as a dict
    records = shapefile.load_as_dict(shapename,loadShapes=False)
    
    #--get nrow and ncol
    nrow = max(records[rowname])
    ncol = max(records[colname])

    #--row and column maps
    row,col = records[rowname],records[colname]

    #--setup a dict for all of the arrays and map the values
    array_dict = {}
    for key,record in records.iteritems():
        
        decimal = h_dict[key]
        if decimal == 0:
            arr = np.zeros((nrow,ncol),dtype=np.int)               
        else: 
            arr = np.zeros((nrow,ncol))            
        print key,arr.dtype            
        try:
            for r,c,val in zip(row,col,record):
                arr[r-1,c-1] = val
            array_dict[key] = arr.copy()
        except:
            print 'couldnt cast '+str(key)+' field to array'
    


    return array_dict
def load_shape_arrays(shapename, rowname='row', colname='column'):
    #--get the field names and make sure rowname and colname are found
    field_names = shapefile.get_fieldnames(grid_shapename)
    assert rowname in field_names
    assert colname in field_names

    #--get the decimal of each field - to get the array type later
    grid_shp = shapefile.Reader(shapename)
    header = grid_shp.dbfHeader()
    h_dict = {}
    for item in header:
        h_dict[item[0]] = int(item[-1])

    #--load all of the records as a dict
    records = shapefile.load_as_dict(shapename, loadShapes=False)

    #--get nrow and ncol
    nrow = max(records[rowname])
    ncol = max(records[colname])

    #--row and column maps
    row, col = records[rowname], records[colname]

    #--setup a dict for all of the arrays and map the values
    array_dict = {}
    for key, record in records.iteritems():

        decimal = h_dict[key]
        if decimal == 0:
            arr = np.zeros((nrow, ncol), dtype=np.int)
        else:
            arr = np.zeros((nrow, ncol))
        print key, arr.dtype
        try:
            for r, c, val in zip(row, col, record):
                arr[r - 1, c - 1] = val
            array_dict[key] = arr.copy()
        except:
            print 'couldnt cast ' + str(key) + ' field to array'

    return array_dict
import sys
import numpy as np
import shapefile

import bro

col_name = 'ibound_CS'
grid_shapename = '..\\..\\_gis\\shapes\\broward_grid_master'
shape = shapefile.Reader(grid_shapename)
fieldnames = shapefile.get_fieldnames(grid_shapename)
v_idx = fieldnames.index(col_name)
r_idx,c_idx = fieldnames.index('row'),fieldnames.index('column')
arr = np.zeros((bro.nrow,bro.ncol)) - 1.0e+10
for i in range(shape.numRecords):
    rec = shape.record(i)
    r,c = rec[r_idx],rec[c_idx]
    val = rec[v_idx]
    arr[r-1,c-1] = val
np.savetxt('ibound_CS.ref',arr,fmt=' %3.0f')

def main():

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename,loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i,wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'],records['column']
    pump = pandas.read_csv('..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year,month=1,day=1)
        line_active.append(dt)


    #--head stuff
    #--use bot of Q5 to check for dry cells        
    hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    hds_layer_idx = 0
    head_file = flow.root+'.hds'
    headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    htimes = headObj.get_time_list()

    #--zeta stuff  
    zta_layer_idx = 0
    zta_elev = np.loadtxt(flow.ref_dir+'Q1_bot.ref')
    zeta_file = flow.root+'.zta'
    zetaObj = mfb.MODFLOW_CBB(flow.nlay,flow.nrow,flow.ncol,zeta_file)
    zta_text = '    ZETAPLANE  1'
    z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None
    
    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i,dt in enumerate(flow.sp_start):
        if i >= istart and i%sp_step == 0:
            print 'building args list for ',dt 
            try:
                h_seekpoint = long(htimes[i,3])
            except:
                break
            if zeta_file:
                z_seekpoint =  long(z1times[i,3])            
            else:
                z_seekpoint = None

            act_lines = []
            for ldt,line in zip(line_active,lines):
                if ldt <= dt:
                    act_lines.append(line)
    
            act_wells = []
            if i == 0:
                plt_start = dt
            else:
                plt_start = flow.sp_start[i-sp_step]
            plt_end = flow.sp_end[i]        
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()                          
            for wname,wpoint,wrow,wcol,wzbot in zip(well_names,well_points,well_rows,well_cols,well_zbots):            
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:    
                    act_wells.append(wpoint)            

            
            fig_name = plt_dir+'sp{0:03.0f}.png'.format(plt_num)
            #fig_title = 'stress period '+str(i+1)+' start date '+dt.strftime('%d/%m/%Y')
            fig_title = str(dt.year)
            args = [fig_name,h_seekpoint,z_seekpoint,act_lines,act_wells,hds_layer_idx,zta_layer_idx,fig_title]        
            q_args.append(args)
            plt_num += 1    


    jobq = mp.JoinableQueue() 
    
    
    #--for testing
    #jobq.put_nowait(q_args[0])
    #jobq.put_nowait(None)
    #plot_worker(jobq,0,head_file,None,hds_elev,zta_elev)
    #return       
    
    procs = []
    num_procs = 6
    
    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker,args=(jobq,i,head_file,zeta_file,hds_elev,zta_elev))
        p.daemon = True
        print 'starting process',p.name
        p.start()
        procs.append(p)
    
    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)      

    #--block until all finish
    for p in procs:
        p.join() 
        print p.name,'Finished' 
    
    cmd_line = 'ffmpeg.exe -i results\sp%03d.png -r 24 demo.avi -y'
    os.system(cmd_line)    
    return             
import shapefile

shapename = '..\\_gis\\scratch\\all_K_locations_layers'
shp = shapefile.Reader(shapename)
records, shapes = shp.records(), shp.shapes()
fnames = shapefile.get_fieldnames(shapename)
lay_idx = fnames.index('k_layer')
k_idx = fnames.index('K_ftday')
#--loop once to get layer groups
layer_groups = {}
for rec, shape in zip(records, shapes):
    lay = rec[lay_idx]
    k = float(rec[k_idx])
    point = shape.points[0]
    if lay in layer_groups.keys():
        layer_groups[lay][0].append(k)
        layer_groups[lay][1].append(point)
    else:
        layer_groups[lay] = [[k], [point]]

out_dir = 'gslib\\'
for layer, [ks, points] in layer_groups.iteritems():
    print layer
    f = open(out_dir + layer + '.dat', 'w')
    f.write(layer + '\n')
    f.write('3\nX\nY\nK\n')
    for pt, k in zip(points, ks):
        f.write('{0:15.6F}  {1:15.6F}  {2:15.6F}\n'.format(pt[0], pt[1], k))
    f.close()
def main():

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename, loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i, wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'], records['column']
    pump = pandas.read_csv(
        '..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',
        index_col=0,
        parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename, ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year, month=1, day=1)
        line_active.append(dt)

    #--head stuff
    #--use bot of Q5 to check for dry cells
    hds_elev = np.loadtxt(flow.ref_dir + 'Q5_bot.ref')
    hds_layer_idx = 0
    head_file = flow.root + '.hds'
    headObj = mfb.MODFLOW_Head(flow.nlay, flow.nrow, flow.ncol, head_file)
    htimes = headObj.get_time_list()

    #--zeta stuff
    zta_layer_idx = 0
    zta_elev = np.loadtxt(flow.ref_dir + 'Q1_bot.ref')
    zeta_file = flow.root + '.zta'
    zetaObj = mfb.MODFLOW_CBB(flow.nlay, flow.nrow, flow.ncol, zeta_file)
    zta_text = '    ZETAPLANE  1'
    z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None

    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i, dt in enumerate(flow.sp_start):
        if i >= istart and i % sp_step == 0:
            print 'building args list for ', dt
            try:
                h_seekpoint = long(htimes[i, 3])
            except:
                break
            if zeta_file:
                z_seekpoint = long(z1times[i, 3])
            else:
                z_seekpoint = None

            act_lines = []
            for ldt, line in zip(line_active, lines):
                if ldt <= dt:
                    act_lines.append(line)

            act_wells = []
            if i == 0:
                plt_start = dt
            else:
                plt_start = flow.sp_start[i - sp_step]
            plt_end = flow.sp_end[i]
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()
            for wname, wpoint, wrow, wcol, wzbot in zip(
                    well_names, well_points, well_rows, well_cols, well_zbots):
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:
                    act_wells.append(wpoint)

            fig_name = plt_dir + 'sp{0:03.0f}.png'.format(plt_num)
            #fig_title = 'stress period '+str(i+1)+' start date '+dt.strftime('%d/%m/%Y')
            fig_title = str(dt.year)
            args = [
                fig_name, h_seekpoint, z_seekpoint, act_lines, act_wells,
                hds_layer_idx, zta_layer_idx, fig_title
            ]
            q_args.append(args)
            plt_num += 1

    jobq = mp.JoinableQueue()

    #--for testing
    #jobq.put_nowait(q_args[0])
    #jobq.put_nowait(None)
    #plot_worker(jobq,0,head_file,None,hds_elev,zta_elev)
    #return

    procs = []
    num_procs = 6

    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker,
                       args=(jobq, i, head_file, zeta_file, hds_elev,
                             zta_elev))
        p.daemon = True
        print 'starting process', p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    #--block until all finish
    for p in procs:
        p.join()
        print p.name, 'Finished'

    cmd_line = 'ffmpeg.exe -i results\sp%03d.png -r 24 demo.avi -y'
    os.system(cmd_line)
    return
Exemplo n.º 7
0
well_names = records['DPEP_NAME']
well_zbots = records['zbot']
float_zbots = []
for i, wb in enumerate(well_zbots):
    float_zbots.append(float(wb))
well_zbots = np.array(float_zbots)
well_rows, well_cols = records['row'], records['column']
pump = pandas.read_csv('..\\..\\_pumpage\\pws_filled_zeros.csv',
                       index_col=0,
                       parse_dates=True)

#--load lines and active dates
line_shapename = '..\\..\\_gis\shapes\sw_reaches'
lines = sf.load_shape_list(line_shapename)
shp = sf.Reader(line_shapename)
fnames = sf.get_fieldnames(line_shapename, ignorecase=True)
#for i,fn in enumerate(fnames):
#    print i,fn
a_idx = fnames.index('ACTIVE_ST')
line_active = []
for i in range(shp.numRecords):
    rec = shp.record(i)
    year = int(rec[a_idx])
    if year < bro.start.year:
        year = bro.start.year
    dt = datetime(year=year, month=1, day=1)
    line_active.append(dt)

#--head stuff
#--use bot of layer 1 to check for dry cells
l1_bot = np.loadtxt('ref\\Q5_bot.ref')
def main(num_plots):

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename,loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i,wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'],records['column']
    pump = pandas.read_csv('..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year,month=1,day=1)
        line_active.append(dt)


     #--head stuff
    #--use bot of Q5 to check for dry cells        
    #hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    #hds_layer_idx = 0
    #head_file = flow.root+'.hds'
    #headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    #htimes = headObj.get_time_list()

    #--conc stuff
    conc_lay_idxs = [0,5,9,11]
    conc_file = 'MT3D001.UCN'
    concObj = mfb.MT3D_Concentration(seawat.nlay,seawat.nrow,seawat.ncol,conc_file)
    ctimes = concObj.get_time_list()

   

    #--zeta stuff  
    #zta_layer_idx = 0
    #zta_elev = np.loadtxt(flow.ref_dir+'Q1_bot.ref')
    #zeta_file = flow.root+'.zta'
    #zetaObj = mfb.MODFLOW_CBB(flow.nlay,flow.nrow,flow.ncol,zeta_file)
    #zta_text = '    ZETAPLANE  1'
    #z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None
    
    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\seawat\\'
    
    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i,[start,end] in enumerate(zip(seawat.sp_start,seawat.sp_end)):
        if i >= istart and i%sp_step == 0:
            print 'building args list for stress period ending on ',end 
            #--find the conc output nearest the end of the stress period

            try:
                kper_seekpoints = ctimes[np.where(ctimes[:,2]==i+1),-1]
                c_seekpoint = long(kper_seekpoints[0][-1])

            except:
                break
            
            act_lines = []
            for ldt,line in zip(line_active,lines):
                if ldt <= start:
                    act_lines.append(line)
    
            act_wells = []
            if i == 0:
                plt_start = start
            else:
                plt_start = seawat.sp_start[i-sp_step]
            plt_end = seawat.sp_end[i]        
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()                          
            for wname,wpoint,wrow,wcol,wzbot in zip(well_names,well_points,well_rows,well_cols,well_zbots):            
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:    
                    act_wells.append(wpoint)            

            
            fig_name = plt_dir+'sp{0:03.0f}_conc.png'.format(plt_num)
            fig_title = 'stress period '+str(i+1)+' start date '+start.strftime('%d/%m/%Y')
            args = [fig_name,c_seekpoint,conc_lay_idxs,act_lines,act_wells,fig_title]        
            q_args.append(args)
            plt_num += 1   
            if num_plots != None and i > num_plots:
                break 


    jobq = mp.JoinableQueue() 
    
    
    #--for testing
    if num_plots != None:
        jobq.put_nowait(q_args[0])
        jobq.put_nowait(None)
        plot_worker(jobq,1,conc_file)
        return       
    
    procs = []
    num_procs = 3
    
    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker,args=(jobq,i,conc_file))
        p.daemon = True
        print 'starting process',p.name
        p.start()
        procs.append(p)
    
    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)      

    #--block until all finish
    for p in procs:
        p.join() 
        print p.name,'Finished' 
    
    anim_name = 'png\\demo_conc.avi'
    if os.path.exists(anim_name):
        os.remove(anim_name)
    cmd_line = 'ffmpeg.exe -i png\\results\\seawat\\sp%03d_conc.png -r 24 '+anim_name+' -y'
    os.system(cmd_line)    
    return                
Exemplo n.º 9
0
    #
    ##--run fac2real
    #fac2real_args[0] = fac_name
    #fac2real_args[2] = klocs_name
    #fac2real_args[-3] = k_name
    #args = '\n'.join(fac2real_args)
    #f = open(facin_name,'w')
    #f.write(args)
    #f.close()
    #os.system('fac2real.exe <'+facin_name+' >'+facout_name)
    ##break

#--add the interpolated K to the grid shapefile
shapename = '..\\_gis\\shapes\\broward_grid_master'
grid = shapefile.Reader(shapename)
names = shapefile.get_fieldnames(shapename, ignorecase=True)
row_idx, col_idx = names.index('ROW'), names.index('COLUMN')

#--load the k arrays
print 'loading k and std arrays'
k_arrays = []
for name in k_names:
    k = pu.load_wrapped_format(bro.nrow, bro.ncol, name)
    #np.savetxt(name,k,fmt=' %15.6E')
    k_arrays.append(k)
std_arrays = []
for name in std_names:
    std = pu.load_wrapped_format(bro.nrow, bro.ncol, name)
    #np.savetxt(name,std,fmt=' %15.6E')
    std_arrays.append(std)
import sys
import numpy as np
import shapefile

import bro

col_name = 'ibound_CS'
grid_shapename = '..\\..\\_gis\\shapes\\broward_grid_master'
shape = shapefile.Reader(grid_shapename)
fieldnames = shapefile.get_fieldnames(grid_shapename)
v_idx = fieldnames.index(col_name)
r_idx, c_idx = fieldnames.index('row'), fieldnames.index('column')
arr = np.zeros((bro.nrow, bro.ncol)) - 1.0e+10
for i in range(shape.numRecords):
    rec = shape.record(i)
    r, c = rec[r_idx], rec[c_idx]
    val = rec[v_idx]
    arr[r - 1, c - 1] = val
np.savetxt('ibound_CS.ref', arr, fmt=' %3.0f')
def main(num_plots):

    #--load well locations and pandas dataframe
    well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
    well_points = sf.load_shape_list(well_shapename)
    #shp = sf.reader(well_shapename)
    #print sf.get_fieldnames(well_shapename)
    records = sf.load_as_dict(well_shapename, loadShapes=False)
    well_names = records['DPEP_NAME']
    well_zbots = records['zbot']
    float_zbots = []
    for i, wb in enumerate(well_zbots):
        float_zbots.append(float(wb))
    well_zbots = np.array(float_zbots)
    well_rows, well_cols = records['row'], records['column']
    pump = pandas.read_csv(
        '..\\..\\_pumpage\\dataframes\\pws_filled_zeros.csv',
        index_col=0,
        parse_dates=True)

    #--load lines and active dates
    line_shapename = '..\\..\\_gis\shapes\sw_reaches'
    lines = sf.load_shape_list(line_shapename)
    shp = sf.Reader(line_shapename)
    fnames = sf.get_fieldnames(line_shapename, ignorecase=True)
    #for i,fn in enumerate(fnames):
    #    print i,fn
    a_idx = fnames.index('ACTIVE_ST')
    line_active = []
    for i in range(shp.numRecords):
        rec = shp.record(i)
        year = int(rec[a_idx])
        if year < flow.start.year:
            year = flow.start.year
        dt = datetime(year=year, month=1, day=1)
        line_active.append(dt)

    #--head stuff
    #--use bot of Q5 to check for dry cells
    #hds_elev = np.loadtxt(flow.ref_dir+'Q5_bot.ref')
    #hds_layer_idx = 0
    #head_file = flow.root+'.hds'
    #headObj = mfb.MODFLOW_Head(flow.nlay,flow.nrow,flow.ncol,head_file)
    #htimes = headObj.get_time_list()

    #--conc stuff
    conc_lay_idxs = [0, 5, 9, 11]
    conc_file = 'MT3D001.UCN'
    concObj = mfb.MT3D_Concentration(seawat.nlay, seawat.nrow, seawat.ncol,
                                     conc_file)
    ctimes = concObj.get_time_list()

    #--zeta stuff
    #zta_layer_idx = 0
    #zta_elev = np.loadtxt(flow.ref_dir+'Q1_bot.ref')
    #zeta_file = flow.root+'.zta'
    #zetaObj = mfb.MODFLOW_CBB(flow.nlay,flow.nrow,flow.ncol,zeta_file)
    #zta_text = '    ZETAPLANE  1'
    #z1times = zetaObj.get_time_list(zta_text)
    #zeta_file = None

    #-- stress period step
    sp_step = 1
    plt_dir = 'png\\results\\seawat\\'

    #--for ffmpeg - sequentially numbered
    plt_num = 1
    istart = 0
    q_args = []
    for i, [start, end] in enumerate(zip(seawat.sp_start, seawat.sp_end)):
        if i >= istart and i % sp_step == 0:
            print 'building args list for stress period ending on ', end
            #--find the conc output nearest the end of the stress period

            try:
                kper_seekpoints = ctimes[np.where(ctimes[:, 2] == i + 1), -1]
                c_seekpoint = long(kper_seekpoints[0][-1])

            except:
                break

            act_lines = []
            for ldt, line in zip(line_active, lines):
                if ldt <= start:
                    act_lines.append(line)

            act_wells = []
            if i == 0:
                plt_start = start
            else:
                plt_start = seawat.sp_start[i - sp_step]
            plt_end = seawat.sp_end[i]
            pump_plt = pump[plt_start:plt_end]
            pump_plt_sum = pump_plt.sum()
            for wname, wpoint, wrow, wcol, wzbot in zip(
                    well_names, well_points, well_rows, well_cols, well_zbots):
                if wname in pump_plt.keys() and pump_plt_sum[wname] != 0:
                    act_wells.append(wpoint)

            fig_name = plt_dir + 'sp{0:03.0f}_conc.png'.format(plt_num)
            fig_title = 'stress period ' + str(
                i + 1) + ' start date ' + start.strftime('%d/%m/%Y')
            args = [
                fig_name, c_seekpoint, conc_lay_idxs, act_lines, act_wells,
                fig_title
            ]
            q_args.append(args)
            plt_num += 1
            if num_plots != None and i > num_plots:
                break

    jobq = mp.JoinableQueue()

    #--for testing
    if num_plots != None:
        jobq.put_nowait(q_args[0])
        jobq.put_nowait(None)
        plot_worker(jobq, 1, conc_file)
        return

    procs = []
    num_procs = 3

    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=plot_worker, args=(jobq, i, conc_file))
        p.daemon = True
        print 'starting process', p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    #--block until all finish
    for p in procs:
        p.join()
        print p.name, 'Finished'

    anim_name = 'png\\demo_conc.avi'
    if os.path.exists(anim_name):
        os.remove(anim_name)
    cmd_line = 'ffmpeg.exe -i png\\results\\seawat\\sp%03d_conc.png -r 24 ' + anim_name + ' -y'
    os.system(cmd_line)
    return
def main():

    aprefix = 'rch\\rch_'
    #aprefix = 'et\\et_'
    print 'loading forcing dataframe'
    df = pandas.read_csv('NEXRAD.csv', index_col=0, parse_dates=True)
    #df = pandas.read_csv('PET.csv',index_col=0,parse_dates=True)
    df[df < 0.0] = 0.0
    df_keys = list(df.keys())

    print 'loading grid shapefile info'
    grid_shapename = '..\\shapes\\tsala_grid_nexrad'
    grid_shape = shapefile.Reader(grid_shapename)
    fieldnames = shapefile.get_fieldnames(grid_shapename, ignorecase=True)
    row_idx, col_idx = fieldnames.index('ROW'), fieldnames.index('COLUMN_')
    pix_idx, frac_idx = fieldnames.index('NEX_PIX'), fieldnames.index(
        'NEX_FRAC')
    pixel_map = {}
    pixel_numbers = []
    nrow, ncol = -1.0E+10, -1.0E+10
    for i in range(grid_shape.numRecords):
        if i % 500 == 0:
            print i, 'of', grid_shape.numRecords, '\r',
        rec = grid_shape.record(i)
        pix, frac = rec[pix_idx], rec[frac_idx]
        r, c = rec[row_idx], rec[col_idx]
        if r > nrow: nrow = r
        if c > ncol: ncol = c
        idx_tup = (r - 1, c - 1)
        pf_list = []
        if len(pix) > 0:
            pf_list = []

            for p, f in zip(pix.split(','), frac.split(',')):
                p = str(p)
                f = float(f)
                pf_list.append((p, f))
        pixel_map[idx_tup] = pf_list

    #--check for missing pixels
    missing = []
    for idx_tup, pf_list in pixel_map.iteritems():
        for p, f in pf_list:
            if p not in df_keys and p not in missing:
                missing.append(p)
    if len(missing) > 0:
        print 'missing data for', len(missing), ' pixels'
        print len(df_keys)
        for m in missing:
            print m
        raise Exception()

    print 'processing dataframe rows'
    q_args = []
    for dt, pixel_values in df.iterrows():
        print dt
        aname = aprefix + dt.strftime('%Y%m%d') + '.ref'
        q_args.append([aname, pixel_values])

    jobq = mp.JoinableQueue()

    procs = []
    num_procs = 3
    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=worker, args=(i, nrow, ncol, pixel_map, jobq))
        p.daemon = True
        print 'starting process', p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    for p in procs:
        p.join()
        print p.name, 'Finished'
import os
import numpy as np
import pandas
from shapely.geometry import Point
import shapefile

airport_pt = Point([935003, 632657])

select_radius = 5280.0 * 4.0

#--first get the nwis conc data
nwis_shapename = '..\\_gis\\scratch\\broward_nwis_sites_reclen_gw'
fields = shapefile.get_fieldnames(nwis_shapename)
conc_idx = fields.index('conc_len')
siteno_idx = fields.index('site_no')
sitename_idx = fields.index('station_nm')
hole_idx = fields.index('hole_depth')
wdepth_idx = fields.index('well_depth')
shp = shapefile.Reader(nwis_shapename)
shapes = shp.shapes()
records = shp.records()

wr = shapefile.writer_like(nwis_shapename)

#--only use those conc records that have > 0 length
sel_records, sel_points = [], []
for shape, rec in zip(shapes, records):
    shp_pt = Point(shape.points[0])
    if (airport_pt.distance(shp_pt) < select_radius) and (int(rec[conc_idx]) >
                                                          0):
        sel_records.append(rec)
def main():
    adir = 'rch\\'
    out_prefix = 'summary\\rch_'
    field_prefix = 'nex_'
    out_shapename = '..\\shapes\\tsala_nexrad_summary'
    
    #adir = 'et\\'
    #out_prefix = 'summary\\et_'
    #field_prefix = 'pet_'
    #out_shapename = '..\\shapes\\tsala_pet_summary'

    
    nrow,ncol = 384,369
    

    afiles = os.listdir(adir)
    years = {}
    
    dts = []
    for a in afiles:
        dt = datetime.strptime(a.split('.')[0].split('_')[1],'%Y%m%d')
        dts.append(dt)
        if dt.year in years.keys():
            years[dt.year].append(adir+a)
        else:
            years[dt.year] = [adir+a]
    
    
    day = timedelta(days=1)
    dts.sort()
    #for i,today in enumerate(dts[1:]):
    #    if today - dts[i] > day:
    #        print 'missing day',today
    
    start = datetime(year=2000,month=1,day=1)
    end = datetime(year=2011,month=12,day=31)
    today = start
    while today < end:
        if today not in dts:
            print 'missing day',today
        today += day
    return

                    
            
    q_args = []
    yr_out = {}
    for yr,alist in years.iteritems():
        aname = out_prefix+str(yr)+'.ref'
        yr_out[yr] = aname
        q_args.append([aname,alist])
    
    jobq = mp.JoinableQueue()  
    resultq = mp.Queue()
    
    #--for testing
    #jobq.put_nowait(q_args[0])
    #jobq.put_nowait(None) 
    #worker(0,nrow,ncol,jobq,resultq)
    #return
   
    procs = []
    num_procs = 3
    for i in range(num_procs):
        #--pass the woker function jobq and a PID
        p = mp.Process(target=worker,args=(i,nrow,ncol,jobq,resultq))
        p.daemon = True
        print 'starting process',p.name
        p.start()
        procs.append(p)
    
    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)      
    
    for p in procs:
        p.join() 
        print p.name,'Finished' 
    
    #--add summary arrs to shapefile
    print 'adding summary info to grid shapefile'
    shapename = '..\\shapes\\join_all2'
    shp = shapefile.Reader(shapename)
    shapes = shp.shapes()
    fieldnames = shapefile.get_fieldnames(shapename)
    row_idx,col_idx = fieldnames.index('row'),fieldnames.index('column_')

    wr = shapefile.Writer()
    wr.field('row',fieldType='N',size=10,decimal=0)            
    wr.field('column',fieldType='N',size=10,decimal=0)
    
    yr_list = years.keys()
    yr_list.sort()
    
    yr_avgs = []
    for yr in yr_list:
        wr.field(field_prefix+str(yr),fieldType='N',size=20,decimal=10)
        a = np.fromfile(yr_out[yr],dtype=np.float32)
        a.resize(nrow,ncol)        
        yr_avgs.append(a)    
                
    for i,shape in enumerate(shapes):
        if i % 500 == 0:
            print i,len(shapes),'\r',
        rec = shp.record(i)
        row,col = rec[row_idx],rec[col_idx]
        rec = [row,col]
        for yr_avg in yr_avgs:
            rec.append(yr_avg[row-1,col-1])
        wr.poly([shape.points],shapeType=shape.shapeType)
        wr.record(rec)
    wr.save(out_shapename)  
    return


#--load a list of RIV locs and concentrtaions
#--into a dict that is keyed in the row-col tuple
f = open('..\\..\\_BCDPEP\\BCDPEP_reach_conc.dat','r')
f.readline()
tidal_conc = {}
for line in f:
    raw = line.strip().split(',')
    tidal_conc[int(raw[0])] = float(raw[1])
f.close()
print 'loading swr reach - concentration info'
shapename = '..\\..\\_gis\\scratch\\sw_reaches_conn_swrpolylines_2'
records = shapefile.load_as_dict(shapename,loadShapes=False)
fnames = shapefile.get_fieldnames(shapename)
#print fnames

swr_conc = {}
#for r,c,strnum in zip(records['ROW'],records['COLUMN'],records['SRC_struct']):
for reach,strnum,source_reach in zip(records['REACH'],records['SRC_struct'],records['SRC_reach']):
    #--tidal=brackish
    if strnum == -1:
        swr_conc[reach] = tidal_conc[source_reach]
        #swr_conc[reach] = brackish_conc
    #--fresh
    else:
        swr_conc[reach] = fresh_conc

print 'loading SWR-output river info'
riv_filename = flow.root+'.riv'
import numpy as np
import pandas
import shapefile


print 'loading grid shapefile info'
grid_shapename = '..\\shapes\\tsala_grid_nexrad'
grid_shape = shapefile.Reader(grid_shapename)
fieldnames = shapefile.get_fieldnames(grid_shapename,ignorecase=True)
row_idx,col_idx = fieldnames.index('ROW'),fieldnames.index('COLUMN_')
pix_idx,frac_idx = fieldnames.index('NEX_PIX'),fieldnames.index('NEX_FRAC')
pixel_map = {}
pixel_numbers = []
nrow,ncol = -1.0E+10,-1.0E+10
for i in range(grid_shape.numRecords):
    if i % 500 == 0:
        print i,'of',grid_shape.numRecords,'\r',
    rec = grid_shape.record(i)
    pix,frac = rec[pix_idx],rec[frac_idx]
    r,c = rec[row_idx],rec[col_idx]
    if r > nrow: nrow = r
    if c > ncol: ncol = c
    idx_tup = (r-1,c-1)    
    pf_list = []
    if len(pix) > 0:
        pf_list = []
        
        for p,f in zip(pix.split(','),frac.split(',')):
            p = int(p)
            f = float(f)
            pf_list.append((p,f))            
    rch_dts.append(month)

for efile in ets_files:
    #dt = datetime.strptime(efile.split('.')[0].split('_')[-1],'%Y%m%d')
    month = int(efile.split('.')[0].split('_')[-1])
    ets_dts.append(month)

rain,evap = {},{}
for sday,eday in zip(flow.sp_start,flow.sp_end):
    rain[eday] = '#DATASET 7B RAIN\nOPEN/CLOSE '+rch_dir+rch_files[rch_dts.index(sday.month)] + ' {0:10.5f} (BINARY)  -1\n'.format(flow.rch_mult)
    evap[eday] = '#DATASET 8B EVAP\nOPEN/CLOSE '+ets_dir+ets_files[ets_dts.index(sday.month)] + ' {0:10.5f} (BINARY)  -1\n'.format(flow.ets_mult)

#--swrpre polyline shapefile with all the info
swr_shapename = '..\\..\\_gis\\scratch\\sw_reaches_conn_SWRpolylines_2'
shp = shapefile.Reader(swr_shapename)
fieldnames = shapefile.get_fieldnames(swr_shapename)
for i,name in enumerate(fieldnames):
    print i,name

#--map of where attributes are in the dbf - painful
idx = {'reach':22,'iroute':14,'reachgroup':23,'row':21,'column':20,'length':27,'conn':25,'nconn':24,'active':12,'str_num':10,'ibnd':16,'SRC_reach':3}

#-create reach instances
reaches,shp_records = swr.load_reaches_from_shape(swr_shapename,idx)

#--turn off source reaches 549 and 550 - they are too influential
for i,[r,rec] in enumerate(zip(reaches,shp_records)):
    if rec[idx['SRC_reach']] in [549,550]:
        reaches[i].active_dt = datetime(year=3000,month=1,day=1)

Exemplo n.º 18
0
mpl.rcParams['font.monospace']           = 'Courier New'
mpl.rcParams['pdf.compression']          = 0
mpl.rcParams['pdf.fonttype']             = 42

ticksize = 6
mpl.rcParams['legend.fontsize']  = 6
mpl.rcParams['axes.labelsize']   = 8
mpl.rcParams['xtick.labelsize']  = ticksize
mpl.rcParams['ytick.labelsize']  = ticksize


#--load well locations and pandas dataframe
well_shapename = '..\\..\\_gis\\shapes\\pws_combine'
well_points = sf.load_shape_list(well_shapename)
#shp = sf.reader(well_shapename)
print sf.get_fieldnames(well_shapename)
records = sf.load_as_dict(well_shapename,loadShapes=False)
well_names = records['DPEP_NAME']
well_zbots = records['zbot']
float_zbots = []
for i,wb in enumerate(well_zbots):
    float_zbots.append(float(wb))
well_zbots = np.array(float_zbots)
well_rows, well_cols = records['row'],records['column']
pump = pandas.read_csv('..\\..\\_pumpage\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

#--load lines and active dates
line_shapename = '..\\..\\_gis\shapes\sw_reaches'
lines = sf.load_shape_list(line_shapename)
shp = sf.Reader(line_shapename)
fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
#print sf.get_fieldnames(well_shapename)
records = sf.load_as_dict(well_shapename,loadShapes=False)
well_names = records['DPEP_NAME']
well_zbots = records['zbot']
float_zbots = []
for i,wb in enumerate(well_zbots):
    float_zbots.append(float(wb))
well_zbots = np.array(float_zbots)
well_rows, well_cols = records['row'],records['column']
pump = pandas.read_csv('..\\..\\_pumpage\\pws_filled_zeros.csv',index_col=0,parse_dates=True)

#--load lines and active dates
line_shapename = '..\\..\\_gis\shapes\sw_reaches'
lines = sf.load_shape_list(line_shapename)
shp = sf.Reader(line_shapename)
fnames = sf.get_fieldnames(line_shapename,ignorecase=True)
#for i,fn in enumerate(fnames):
#    print i,fn
a_idx = fnames.index('ACTIVE_ST')
line_active = []
for i in range(shp.numRecords):
    rec = shp.record(i)
    year = int(rec[a_idx])
    if year < bro.start.year:
        year = bro.start.year
    dt = datetime(year=year,month=1,day=1)
    line_active.append(dt)


#--head stuff
#--use bot of layer 1 to check for dry cells
def main():

    aprefix = "rch\\rch_"
    # aprefix = 'et\\et_'
    print "loading forcing dataframe"
    df = pandas.read_csv("NEXRAD.csv", index_col=0, parse_dates=True)
    # df = pandas.read_csv('PET.csv',index_col=0,parse_dates=True)
    df[df < 0.0] = 0.0
    df_keys = list(df.keys())

    print "loading grid shapefile info"
    grid_shapename = "..\\shapes\\tsala_grid_nexrad"
    grid_shape = shapefile.Reader(grid_shapename)
    fieldnames = shapefile.get_fieldnames(grid_shapename, ignorecase=True)
    row_idx, col_idx = fieldnames.index("ROW"), fieldnames.index("COLUMN_")
    pix_idx, frac_idx = fieldnames.index("NEX_PIX"), fieldnames.index("NEX_FRAC")
    pixel_map = {}
    pixel_numbers = []
    nrow, ncol = -1.0e10, -1.0e10
    for i in range(grid_shape.numRecords):
        if i % 500 == 0:
            print i, "of", grid_shape.numRecords, "\r",
        rec = grid_shape.record(i)
        pix, frac = rec[pix_idx], rec[frac_idx]
        r, c = rec[row_idx], rec[col_idx]
        if r > nrow:
            nrow = r
        if c > ncol:
            ncol = c
        idx_tup = (r - 1, c - 1)
        pf_list = []
        if len(pix) > 0:
            pf_list = []

            for p, f in zip(pix.split(","), frac.split(",")):
                p = str(p)
                f = float(f)
                pf_list.append((p, f))
        pixel_map[idx_tup] = pf_list

    # --check for missing pixels
    missing = []
    for idx_tup, pf_list in pixel_map.iteritems():
        for p, f in pf_list:
            if p not in df_keys and p not in missing:
                missing.append(p)
    if len(missing) > 0:
        print "missing data for", len(missing), " pixels"
        print len(df_keys)
        for m in missing:
            print m
        raise Exception()

    print "processing dataframe rows"
    q_args = []
    for dt, pixel_values in df.iterrows():
        print dt
        aname = aprefix + dt.strftime("%Y%m%d") + ".ref"
        q_args.append([aname, pixel_values])

    jobq = mp.JoinableQueue()

    procs = []
    num_procs = 3
    for i in range(num_procs):
        # --pass the woker function jobq and a PID
        p = mp.Process(target=worker, args=(i, nrow, ncol, pixel_map, jobq))
        p.daemon = True
        print "starting process", p.name
        p.start()
        procs.append(p)

    for q in q_args:
        jobq.put(q)

    for p in procs:
        jobq.put(None)

    for p in procs:
        p.join()
        print p.name, "Finished"