コード例 #1
0
def group_trajectories():
    trajectories = dict()  # (grp,idx in group) => [ (t,xy), ..]

    times = range(0, ntimes)

    start_time = time.time()

    # by avoiding keeping references to particle arrays, and
    # sorting stuck particles on the fly, the memory usage is
    # kept in check
    for part_grp in range(len(ptm_groups)):
        print("Particle Group %d" % part_grp)

        # read the last time step to get the max particle
        # count, and final resting place.
        step_t, parts = ptm_data[part_grp].read_timestep(ntimes - 1)
        end_xy = parts['x']
        # we output particles once when stuck, but then record
        # them here to avoid any more
        dead = np.zeros(len(end_xy), np.bool8)
        max_particles = len(parts)

        # use list to speed up references
        grp_trajectories = [[] for i in range(max_particles)]

        last_xy = np.nan * np.ones((max_particles, 2))
        lastlast_xy = np.nan * np.ones((max_particles, 2))

        for ti, t in enumerate(utils.progress(times)):
            step_t, parts = ptm_data[part_grp].read_timestep(t)
            step_t = utils.to_dt64(step_t)
            Nstep = len(parts)
            # move as much referencing outside the loop
            part_xy = parts['x'][:, :]

            # particles which do not move between this step and
            # the end of the simulation are presumed dead and
            # not procesed anymore.
            # it might be worth keeping the first dead location.
            # this is probably stripping out some otherwise useful
            # points.
            for part_idx in np.nonzero(~dead[:Nstep])[0]:  # range(Nstep):
                traj = grp_trajectories[part_idx]
                # avoid any references back to parts
                # assumes that traj[-1] is same location as traj[-2]
                # probably safe.
                rec = [step_t, part_xy[part_idx, 0], part_xy[part_idx, 1]]
                if len(traj) >= 2 and (traj[-2][1] == rec[1]) and (traj[-2][2]
                                                                   == rec[2]):
                    # stuck particles are just updated by the latest time/position
                    traj[-1][0] = step_t
                else:
                    traj.append(rec)
            # if a particle is stuck from here on out, remove it from play
            dead[:Nstep] = np.all(part_xy == end_xy[:Nstep, :], axis=1)

        for part_idx, traj in enumerate(grp_trajectories):
            trajectories[(part_grp, part_idx)] = traj

    return trajectories
コード例 #2
0
ファイル: check_tidal_bc.py プロジェクト: rustychris/csc
srv_xy=[615117,4224383]
pc_ll=[-(122+2.4/60.),(38+3.3/60.)]
pc_xy=proj_utils.mapper('WGS84','EPSG:26910')(pc_ll)

ges_xy=[629223,4233353]
fpx_xy=[630728,4257433]

hydro=waq.HydroFiles('/hpcvol1/cascade/WY2011/DFM_DELWAQ_sal+temp/sal+temp.hyd')

g=hydro.grid()

plt.figure()
g.plot_edges(lw=0.3,color='k')
plt.axis('equal')

t0=utils.to_dt64(hydro.time0)

t1=t0+np.timedelta64(170,'D')
tn=t1+np.timedelta64(10,'D')

srv=hydro_utils.extract_water_level(hydro,srv_xy,t1,tn).isel(station=0)
pc=hydro_utils.extract_water_level(hydro,pc_xy,t1,tn).isel(station=0)
ges=hydro_utils.extract_water_level(hydro,ges_xy,t1,tn).isel(station=0)
fpx=hydro_utils.extract_water_level(hydro,fpx_xy,t1,tn).isel(station=0)

plt.figure()

plt.plot(srv.time,srv.water_level,label="SRV")
plt.plot(pc.time,pc.water_level,label="PC")
plt.plot(ges.time,ges.water_level,label="GES")
plt.plot(ges.time,fpx.water_level,label="FPX")
コード例 #3
0
ファイル: run_cal_plotter.py プロジェクト: rustychris/csc
def trim_time(mr,da):
    # trim to match model results, and fill in gaps with tidal data.
    start_time=utils.to_dt64(mr.htimes[0])
    stop_time=utils.to_dt64(mr.htimes[-1])
    return da.isel(time=(da.time>=start_time) & (da.time<=stop_time))
コード例 #4
0
ctd_path = "../../../data/M.Williams/data_for_dsepulveda/CTD data"

# This does in fact have 2012-01-18 -- 2012-03-20 data
# s_* variables are salinity at one or more heights
# d_* variables are depth.
ctd_mat_fn = os.path.join(ctd_path, 'janmar2012',
                          'ctd_allstn_same_timeaxis.mat')

ctd_mat = loadmat(ctd_mat_fn)

# Make that into a xr.Dataset

ctd_ds = xr.Dataset()
ctd_ds['t_matlab'] = ('time', ), ctd_mat['t_all'].squeeze()
ctd_ds['time'] = ('time', ), utils.to_dt64(
    utils.dnum_mat_to_py(ctd_ds['t_matlab']))

# okay.
# So s_nm is salinity at nm
# d_nm is going to be depth
# no temperature??  must be in the source files.

for station in [
        's_nm', 'd_nm', 's_ac', 'd_ac', 's_dc', 'd_dc', 's_pc', 'd_pc', 's_bc',
        'd_bc'
]:
    value = ctd_mat[station + '_all'].squeeze()

    if value.ndim == 1:
        ctd_ds[station] = ('time'), value
    elif value.shape[1] == 4:
コード例 #5
0
import pandas as pd
from stompy import utils
import numpy as np
import xarray as xr
##
df = pd.read_excel(
    ("../../data/OneDrive/Data and References/"
     "Marsh Water Level Data/Pescadero_Lagoon_WLs_2010_2016_metadata.xlsx"),
    sheet_name="continuous data",
    skiprows=[0, 1, 2, 3, 4],
    names=["matlab_date", "datestr", "waterlevel_ft_navd88"])
t = utils.to_dt64(utils.dnum_mat_to_py(df['matlab_date']))
##
# toss in some rounding to avoid annoying floating point date inaccuracy
df['time_pst'] = utils.round_dt64(t, dt=np.timedelta64(60, 's'))
df['time'] = df['time_pst'] + np.timedelta64(8, 'h')

ds = xr.Dataset.from_dataframe(
    df.loc[:, ['time', 'waterlevel_ft_navd88']].set_index('time'))
ds['waterlevel_orig'] = ('time', ), ds['waterlevel_ft_navd88'].values * 0.3048
ds['time'].attrs['timezone'] = 'UTC'
ds['waterlevel_orig'].attrs['datum'] = 'NAVD88'
ds['waterlevel_orig'].attrs['units'] = 'm'

##

from stompy.io.local import noaa_coops

start = np.datetime64("2010-07-07")
stop = np.datetime64("2016-06-01")
noaa_monterey = 9413450
コード例 #6
0
##

# Verify timing against the in-lagoon WSE data
lagoon_wse = xr.open_dataset("../../calibration/esa_compiled_waterlevel.nc")

##

# Verify a chunk of Megan's data too.
from scipy.io import loadmat
mat = loadmat(
    "../../../data/M.Williams/data_for_dsepulveda/CTD data/octdec2011/NM_ctds.mat"
)

nm = xr.Dataset()
nm['time'] = ('time', ), utils.to_dt64(
    utils.dnum_mat_to_py(mat['tz_nm1'].squeeze()))
nm['depth'] = ('time', ), mat['da_nm1'].squeeze()

##

plt.figure(2).clf()
fig, ax = plt.subplots(1, 1, num=2)

ax.plot(ds_monterey.time,
        ds_monterey.water_level.isel(station=0),
        label='NOAA Monterey')
ax.plot(ds_sf.time, ds_sf.water_level.isel(station=0), label='NOAA SF')

ax.plot(lagoon_wse.time, lagoon_wse.waterlevel, label='Lagoon WSE')
ax.plot(nm.time, nm.depth + 0.41, label="MW NM1")
ax.legend(loc='upper right')
コード例 #7
0
    'Date': pd.Series(time),
    vname: pd.Series(p),
    'PO4 mg/L P': pd.Series(p / prat)
}
df6 = pd.DataFrame(d)
#df6.to_csv(outfile)
# plotting
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(time, p, '-x', color="indianred")
ax.set_title("Davis: Total Phosphorus (as P)")
ax.set_ylabel("mg/L")
fig.savefig(outpath + "figures/Davis_Phosphorus.png")

# concatenate
davis = pd.merge(davis, df6, how='outer', on='Date')
davis['Date'] = utils.to_dt64(davis.Date.values)  # standardize type for Date
# save final concatenated file
davis.to_csv(outpath + "davis.csv")

plt.close('all')  # helps with memory use

### MANTECA1 - CWIQS data
if 0:  # Manteca1.csv is no longer generated.  Using Manteca.csv at bottom of this script instead.
    # ammonia
    filename = inpath + "Manteca_5B390104001_Ammonia_Total_as_N.csv"
    outfile = outpath + "Manteca_Ammonia.csv"
    units = "mg/L"
    vname = "NH3 mg/L N"
    am_df = load_wwtp(filename, tname, tformat, varname, unitsname, units,
                      method, cols, vname)
    ind = day_ind(am_df.Time)