Пример #1
0
def copy_matching_fields(g_src,g_dest,eps=1.0,
                         cell_fields=['depth'],edge_fields=['edge_depth']):
    # match cells by centroid in case centers have been
    # adjusted
    src_cc=g_src.cells_centroid()
    src_cc_kdt=KDTree(data=src_cc)
    
    dest_cc=g_dest.cells_centroid()

    for c in utils.progress(range(g_dest.Ncells())):
        dist,src_cell=src_cc_kdt.query(dest_cc[c],distance_upper_bound=eps)
        if not np.isfinite(dist): continue
        for fld in cell_fields:
            g_dest.cells[fld][c]=g_src.cells[fld][src_cell]

    src_ec=g_src.edges_center()
    src_ec_kdt=KDTree(data=src_ec)
    
    dest_ec=g_dest.edges_center()
    
    for j in utils.progress(range(g_dest.Nedges())):
        dist,src_edge=src_ec_kdt.query(dest_ec[j],distance_upper_bound=eps)
        if not np.isfinite(dist): continue
        for fld in edge_fields:
            g_dest.edges[fld][j]=g_src.edges[fld][src_edge]
Пример #2
0
def dem_to_cell_node_bathy(dem,g):
    cell_z=dem_to_cell_bathy(dem,g)
    
    node_z_to_cell_z=sparse.dok_matrix( (g.Ncells(),g.Nnodes()), np.float64 )
    for c in utils.progress(range(g.Ncells())):
        nodes=g.cell_to_nodes(c)
        node_z_to_cell_z[c,nodes]=1./len(nodes)
    # A x = b
    # A: node_z_to_cell_z
    #  x: node_z
    #    b: cell_z
    # to better allow regularization, change this to a node elevation update.
    # A ( node_z0 + node_delta ) = cell_z
    # A*node_delta = cell_z - A*node_z0 
    
    node_z0=dem(g.nodes['x'])
    bad_nodes=np.isnan(node_z0)
    node_z0[bad_nodes]=0.0 # could come up with something better..
    if np.any(bad_nodes):
        print("%d bad node elevations"%bad_nodes.sum())
    b=cell_z - node_z_to_cell_z.dot(node_z0)

    # damp tries to keep the adjustments to O(2m)
    res=sparse.linalg.lsqr(node_z_to_cell_z.tocsr(),b,damp=0.05)
    node_delta, istop, itn, r1norm  = res[:4]
    print("Adjustments to node elevations are %.2f to %.2f"%(node_delta.min(),
                                                             node_delta.max()))
    final=node_z0+node_delta
    if np.any(np.isnan(final)):
        print("Bad news")
        import pdb
        pdb.set_trace()
    return final
Пример #3
0
def extract_particle_snapshot(t):
    # extract particle field at a given time
    snapshot=[]
    for traj_i,traj in enumerate(utils.progress(traj_data)):
        traj_t=traj['traj']['t']
        if traj_t[0]>t:
            continue
        if traj_t[-1]<t:
            continue
        # time index along this trajectory matching up with t
        idx=np.searchsorted(traj_t,t)
        # x,y,Tfill,dTdt,fill_dist
        rec=[traj['traj']['x'][idx],
             traj['traj']['y'][idx],
             traj['Tfill'][idx],
             traj['dTdt'][idx],
             traj['fill_dist'][idx],
             traj_i]
        snapshot.append(rec)

    snap_struct=np.array(snapshot)
    snap_data=dict( x=snap_struct[:,0],
                    y=snap_struct[:,1],
                    Tfill=snap_struct[:,2],
                    dTdt=snap_struct[:,3],
                    fill_dist=snap_struct[:,4],
                    traj_i=snap_struct[:,5].astype(np.int32) )
    return snap_data
Пример #4
0
def group_trajectories():
    trajectories = dict()  # (grp,idx in group) => [ (t,xy), ..]

    times = range(0, ntimes)

    start_time = time.time()

    # by avoiding keeping references to particle arrays, and
    # sorting stuck particles on the fly, the memory usage is
    # kept in check
    for part_grp in range(len(ptm_groups)):
        print("Particle Group %d" % part_grp)

        # read the last time step to get the max particle
        # count, and final resting place.
        step_t, parts = ptm_data[part_grp].read_timestep(ntimes - 1)
        end_xy = parts['x']
        # we output particles once when stuck, but then record
        # them here to avoid any more
        dead = np.zeros(len(end_xy), np.bool8)
        max_particles = len(parts)

        # use list to speed up references
        grp_trajectories = [[] for i in range(max_particles)]

        last_xy = np.nan * np.ones((max_particles, 2))
        lastlast_xy = np.nan * np.ones((max_particles, 2))

        for ti, t in enumerate(utils.progress(times)):
            step_t, parts = ptm_data[part_grp].read_timestep(t)
            step_t = utils.to_dt64(step_t)
            Nstep = len(parts)
            # move as much referencing outside the loop
            part_xy = parts['x'][:, :]

            # particles which do not move between this step and
            # the end of the simulation are presumed dead and
            # not procesed anymore.
            # it might be worth keeping the first dead location.
            # this is probably stripping out some otherwise useful
            # points.
            for part_idx in np.nonzero(~dead[:Nstep])[0]:  # range(Nstep):
                traj = grp_trajectories[part_idx]
                # avoid any references back to parts
                # assumes that traj[-1] is same location as traj[-2]
                # probably safe.
                rec = [step_t, part_xy[part_idx, 0], part_xy[part_idx, 1]]
                if len(traj) >= 2 and (traj[-2][1] == rec[1]) and (traj[-2][2]
                                                                   == rec[2]):
                    # stuck particles are just updated by the latest time/position
                    traj[-1][0] = step_t
                else:
                    traj.append(rec)
            # if a particle is stuck from here on out, remove it from play
            dead[:Nstep] = np.all(part_xy == end_xy[:Nstep, :], axis=1)

        for part_idx, traj in enumerate(grp_trajectories):
            trajectories[(part_grp, part_idx)] = traj

    return trajectories
Пример #5
0
def fig_dist(C,num=2,log=False,title="",local_max=False, direction=False):
    fig=plt.figure(num)
    fig.clf()
    fig.set_size_inches([6,9],forward=True)
    ax=fig.add_subplot(1,1,1)
    cax=fig.add_axes([0.05,0.6,0.03,0.35])
    fig.subplots_adjust(left=0,right=1,top=1,bottom=0)
    if log:
        C=np.log10(C.clip(1e-10,np.inf))
        label='log$_{10}$'
    else:
        label='linear'
        
    ccoll=g.plot_cells(values=C,ax=ax,cmap=cmap)
    ccoll.set_lw(0.05)
    ccoll.set_edgecolor('face')
    
    # plt.colorbar(ccoll,cax=cax,label=label)
    plot_utils.cbar(ccoll,cax=cax,label=label)
    
    if local_max:
        is_local_max=np.ones(g.Ncells(),np.bool8)
        e2c=g.edge_to_cells()
        internal=e2c.min(axis=1)>=0
        c1=e2c[internal,0]
        c2=e2c[internal,1]
        c1_less=C[c1]<C[c2]
        is_local_max[ c1[c1_less] ]=False
        c2_less=C[c2]<C[c1]
        is_local_max[ c2[c2_less] ]=False
        cc=g.cells_center()
        ax.plot(cc[is_local_max,0],cc[is_local_max,1],'ko')

    if direction:
        nbrhood=4
        idxs=np.arange(g.Ncells())
        np.random.shuffle(idxs)
        samp_cells=idxs[:1000]
        cc=g.cells_center()
        XY=cc[samp_cells]
        UV=np.nan*XY
        for i,c in utils.progress(enumerate(samp_cells)):
            cells=[c]
            for _ in range(nbrhood): cells=np.unique(list(cells) + [c for c0 in cells for c in g.cell_to_cells(c0) if c>=0] )
            #y=C[cells]
            #X=np.c_[cc[cells]-cc[cells].mean(axis=0),
            #        np.ones(len(cells))] # location and bias
            #beta_hat=np.linalg.lstsq(X,y,rcond=None)[0]
            #UV[i,:]=beta_hat[:2] # [gradx,grady]
            low=cells[ np.argmin(C[cells]) ]
            high=cells[ np.argmax(C[cells]) ]
            UV[i,:]=(cc[high]-cc[low])/(C[high]-C[low])
        ax.quiver( XY[:,0], XY[:,1], UV[:,0], UV[:,1],pivot='tip',scale=60,width=0.005)
        
    ax.xaxis.set_visible(0)
    ax.yaxis.set_visible(0)
    ax.axis('equal')
    ax.text(0.5,0.98,title,transform=ax.transAxes,va='top',ha='center')
    return fig
Пример #6
0
def find_all_pairs(g, max_segs=7):
    """
    Automatically pull channel cross sections up to 7 segments wide.
    This does not use any hydro data, so relies on grid boundaries
    alone. For the CSC grid, it's right about 95% of the time, so
    don't assume that every channel section will be found, or that
    every node pair returns is a channel section.
    """

    e2c = g.edge_to_cells()
    e_boundary = np.any(e2c < 0, axis=1)
    boundary_nodes = np.unique(g.edges['nodes'][e_boundary])
    boundary_mask = np.zeros(g.Nnodes(), np.bool)
    boundary_mask[boundary_nodes] = True
    ##
    node_marks = np.zeros(g.Nnodes(), np.bool8)
    all_pairs = []

    def boundary_weight(j):
        if e_boundary[j]:
            return 1.0
        else:
            return np.nan

    def internal_weight(j):
        if e_boundary[j]:
            return np.nan
        else:
            return 1.0

    for n in utils.progress(boundary_nodes,
                            msg="find_all_pairs %s boundary nodes"):
        if node_marks[n]:
            continue
        node_marks[n] = True

        # search only boundary edges to rule out along-boundary neighbors
        my_nbrs = g.shortest_path(n,
                                  n2=boundary_nodes,
                                  edge_weight=boundary_weight,
                                  max_return=2 * max_segs)
        my_nbrs = [mn[0] for mn in my_nbrs]
        other_n2 = np.setdiff1d(boundary_nodes, my_nbrs)
        tran_path = g.shortest_path(n,
                                    n2=other_n2,
                                    edge_weight=internal_weight,
                                    max_return=1)
        if len(tran_path) == 0:
            continue
        n2 = tran_path[0][1][0]
        all_pairs.append([n, n2])
        # this isn't a strictly commutative property, but close enough
        # for our purposes
        node_marks[n2] = True
    return all_pairs
Пример #7
0
def never_stuck(ptm_out, ntimes):
    t_a, state = ptm_out.read_timestep(ts=0)
    last_x = state['x'][:, :2]
    stuck_count = np.zeros(len(last_x), np.int32)

    for ti in utils.progress(range(1, ntimes)):
        t_a, state = ptm_out.read_timestep(ts=ti)
        this_x = state['x'][:, :2]
        stuck = np.all(last_x == this_x, axis=1)
        stuck_count[stuck] += 1
        last_x = this_x
    return stuck_count == 0
Пример #8
0
        def phase_map(val,fit_cos=fit_cos,fit_sin=fit_sin,small=1e-6):
            phases=np.zeros(Ncells,np.float64)

            for c in utils.progress(range(Ncells)):
                c_val = val[:,c]
                C=np.cov( np.c_[fit_cos,fit_sin,c_val].T )

                if C[2,2] > small: # some signal at this cell
                    phase=np.arctan2(C[0,2],C[1,2]) * 180/np.pi
                else:
                    phase=np.nan
                phases[c]=phase
            return phases
Пример #9
0
def track_cloud(cloud_particles):
    # track cloud.  this ti is an index into the time steps already extracted
    track_vars = []

    for ti in utils.progress(time_steps, 1.0, "processing %s timesteps"):
        cloud_xy = particles[ti, cloud_particles, :]
        cloud_cells = np.array([g.select_cells_nearest(xy) for xy in cloud_xy])
        pair_dists = pairwise_grid_distance(cloud_cells)
        variance_per_cell = (pair_dists**2).mean(axis=1)
        best_variance = variance_per_cell.min()
        track_vars.append(best_variance)

    track_vars = np.array(track_vars)

    mb = np.polyfit(track_time_s, track_vars, 1)
    return mb[0]
Пример #10
0
def get_particle_mass(pb,inflow,rel_stride=2):
    """
    pb: a PtmBin object.
    inflow: dataset with Q, dnum
    rel_stride: how many ptm outputs go by for each release period.
    returns a {particle id: particle mass}
    """
    ptm_out_dt_s=pb.dt_seconds()
    nsteps=pb.count_timesteps()

    # Associate weights with particle ids
    # At this stage, we just weight based on unit concentration
    # in the flow.
    part_mass={} # particle id => 'mass'
    for step in utils.progress(range(0,nsteps-1,rel_stride)):
        t,parts=pb.read_timestep(step)
        dnum=utils.to_dnum(t)
        new_ids=[ p['id'] for p in parts if p['id'] not in part_mass]
        # kludge - the quarter hour offset here gives more constant
        # particle mass.  It's close enough, so I'm not going to worry
        # about replicating the integration further.
        Qnow=np.interp(dnum+ptm_out_dt_s*0.5/86400,
                       inflow.dnum.values,inflow.Q)
        if len(new_ids)==0:
            if Qnow>0:
                print(f"WARNING: {t.strftime('%Y-%m-%d %H:%M')}: {len(new_ids):6d} new particles, Q={Qnow:9.2f} m3/s")
            continue
        
        mass_per_particle=max(0,Qnow*ptm_out_dt_s / len(new_ids))
        if step%20==0:
            print(f"{t.strftime('%Y-%m-%d %H:%M')}: {len(new_ids):6d} new particles, Q={Qnow:9.2f} m3/s, mass/part {mass_per_particle:9.2f}")
        for i in new_ids:
            part_mass[i]=mass_per_particle

    if len(part_mass)==0:
        # like petaluma, has no flow in this period.
        return np.nan*np.ones(1)
    # And convert to array for faster indexing.
    max_id=np.max(list(part_mass.keys()))
    print("max_id: ",max_id)
    # leave unset values nan to detect issues later.
    part_mass_a=np.nan*np.zeros(max_id+1,np.float64)
    for k in part_mass:
        part_mass_a[k]=part_mass[k]
    return part_mass_a
Пример #11
0
def dem_to_cell_bathy(dem,g,fill_iters=20):
    cell_means=np.zeros(g.Ncells(),np.float64)
    for c in utils.progress(range(g.Ncells()),msg="dem_to_cell_bathy: %s"):
        #msk=dem.polygon_mask(g.cell_polygon(c))
        #cell_means[c]=np.nanmean(dem.F[msk])
        cell_means[c]=np.nanmean(dem.polygon_mask(g.cell_polygon(c),return_values=True))
    
    for _ in range(fill_iters):
        missing=np.nonzero(np.isnan(cell_means))[0]
        if len(missing)==0:
            break
        new_depths=[]
        print("filling %d missing cell depths"%len(missing))
        for c in missing:
            new_depths.append( np.nanmean(cell_means[g.cell_to_cells(c)]) )
        cell_means[missing]=new_depths
    else:
        print("Filling still left %d nan cell elevations"%len(missing))
    return cell_means
Пример #12
0
def particle_to_density(particles, grid, normalize='area'):
    """
    particles: struct array with 'x' and 'mass'
    normalize: 'area' normalize by cell areas to get a mass/area
               'volume': Not implemented yet.
    """

    mass = np.zeros(grid.Ncells(), np.float64)

    for i in utils.progress(range(len(particles))):
        cell = grid.select_cells_nearest(particles['x'][i, :2])
        if cell is None: continue
        mass[cell] += particles['mass'][i]

    if normalize == 'area':
        mass /= grid.cells_area()
    elif normalize == 'mass':
        pass
    else:
        raise Exception(f"Not ready for normalize={normalize}")
    return mass
Пример #13
0
def fill_by_divergence(gtri, Qtri):
    """
    Update nan entries in Qtri based on divergence in adjacent
    cells.
    """
    bad_cells = []

    e2c = gtri.edge_to_cells()
    valid = np.isfinite(Qtri)
    for j in utils.progress(np.nonzero(~valid)[0]):
        c1, c2 = e2c[j]

        # flow out of each cell.
        # positive flow on an edge means out of c1, into c2.
        Qcs = []
        Qabs = []  # to get a sense of roundoff scale

        # csgn relates the net flow out of that cell to what should
        # be put on j.  so a net flow out of c1, Qc>0, means that
        # j should put flow into c1, which is a negative Q on edge
        # j.
        for csgn, c in [(-1, c1), (1, c2)]:
            if c < 0:
                continue
            Qc = cell_divergence(gtri, c, Qtri)
            Qcs.append(Qc * csgn)
        if len(Qcs) > 1:
            eps = 1e-3
            abs_scale = 1e-2
            abs_err = np.abs(Qcs[0] - Qcs[1])
            rel_err = abs_err / (np.mean(np.abs(Qcs)) + eps)
            if (rel_err > eps) and (abs_err > abs_scale):
                print("Flows Q[c=%d]=%f Q[c=%d]=%f don't match" %
                      (c1, Qcs[0], c2, Qcs[1]))
                bad_cells.append(c1)
                bad_cells.append(c2)

        Qtri[j] = np.mean(Qcs)
        assert np.isfinite(Qtri[j])
Пример #14
0
def track_cloud(cloud_particles):
    # track cloud.  this ti is an index into the time steps already extracted
    track_vars=[]

    for ti in utils.progress(time_steps,1.0,"processing %s timesteps"):
        cloud_xy=particles[ti,cloud_particles,:]
        cloud_cells=np.array( [g.select_cells_nearest(xy) for xy in cloud_xy] )
        if np.any( dead_cells[cloud_cells] ):
            # print("Particles hit boundary - truncate time")
            break
        pair_dists=pairwise_grid_distance(cloud_cells)
        variance_per_cell=(pair_dists**2).mean(axis=1)
        best_variance=variance_per_cell.min()
        track_vars.append(best_variance)

    track_vars=np.array(track_vars)
    t_s=track_time_s[:len(track_vars)]

    # give up if there is less than 12h or 5 data points.
    if len(t_s>5) and (t_s[-1]-t_s[0])>43200:
        mb=np.polyfit(t_s,track_vars,1)
        return mb[0]
    else:
        return np.nan
Пример #15
0
fig, ax = plt.subplots(num=1)

ax.plot(parts['x'][:, 0], parts['x'][:, 1], 'g.')
ax.plot(parts['x'][cloud_idxs, 0], parts['x'][cloud_idxs, 1], 'm.', ms=10)

plot_wkb.plot_polygon(grid_poly, ax=ax, fc='0.8', ec='k', lw=0.5)

ax.axis('equal')
ax.axis(zoom)

##

# build up just the 2D coordinates for all of the initial release particles
part_locs = []
part_ts = []
for ti in utils.progress(range(0, 1000, 4)):
    t, parts = ptm_group.read_timestep(ti)
    part_locs.append(parts['x'][:, :2].copy())
    part_ts.append(t)

# This will fail is there are additional releases in this group
part_locs = np.array(part_locs)
part_ts = np.array([utils.to_dt64(t) for t in part_ts])

##

cloud_t_xy = part_locs[:, cloud_idxs, :]

cloud_cc = cloud_t_xy.mean(axis=1)

# In this case it remains more or less in the channel.
Пример #16
0
        if 0:  # Simplest option:
            #   Put bathy on nodes, just direct sampling.
            z_node = dem(g.nodes['x'])
        if 1:  # Bias deep
            name += "_deep"
            # Maybe a good match with bedlevtype=5.
            # BLT=5: edges get shallower node, cells get deepest edge.
            # So extract edge depths (min,max,mean), and nodes get deepest
            # edge.

            alpha = np.linspace(0, 1, 5)
            edge_data = np.zeros((g.Nedges(), 3), np.float64)

            # Find min/max/mean depth of each edge:
            for j in utils.progress(range(g.Nedges())):
                pnts = (alpha[:, None] * g.nodes['x'][g.edges['nodes'][j, 0]] +
                        (1 - alpha[:, None]) *
                        g.nodes['x'][g.edges['nodes'][j, 1]])
                z = dem(pnts)
                edge_data[j, 0] = z.min()
                edge_data[j, 1] = z.max()
                edge_data[j, 2] = z.mean()

            z_node = np.zeros(g.Nnodes())
            for n in utils.progress(range(g.Nnodes())):
                # This is the most extreme bias: nodes get the deepest
                # of the deepest points along adjacent edgse
                z_node[n] = edge_data[g.node_to_edges(n), 0].min()

        g.add_node_field('node_z_bed', z_node, on_exists='overwrite')
Пример #17
0
ntimes=ptm_data[0].count_timesteps()

run_start=ptm_data[0].read_timestep(0)[0]
run_stop =ptm_data[0].read_timestep(ntimes-1)[0]

##

ptm_group=ptm_data[0] # initial release

##

# build up just the 2D coordinates for all of the initial release particles
part_locs=[]
part_ts=[]
for ti in utils.progress(range(0,1000,4)):
    t,parts=ptm_group.read_timestep(ti)
    part_locs.append( parts['x'][:,:2].copy() )
    part_ts.append(t)

# This will fail is there are additional releases in this group
part_locs=np.array(part_locs)
part_ts=np.array([utils.to_dt64(t) for t in part_ts])

##

from scipy import sparse
from scipy.sparse import csgraph

def grid_to_graph(g):
    # use cell-to-cell connections to make it easier to avoid
    ('AM2',AM2),
    ('AM3',AM3),
    ('AM4',AM4),
    ('AM6',AM6),
    ('AM7',AM7),
    ('AM8',AM8),
    ('AM9',AM9),
    ('SM1',SM1),
    ('SM2',SM2),
    ('SM3',SM3),
    ('SM4',SM4),
    ('SM7',SM7),
    ('SM8',SM8)
]

all_detects=[pt.parse_tek(fn,name=name) for name,fn in utils.progress(all_receiver_fns)]
##

all_detects_nomp=[pt.remove_multipath(d) for d in all_detects]

##


# First cut: 
# Limit to 1 hour of detections. 3388 detections over this 1 hour.

# one good tag - C56B (just 4 fixes)
#t_clip=[np.datetime64("2019-03-25 02:00"),
#        np.datetime64("2019-03-25 03:00")]

# one good tag - C535 (but just 4 fixes)
Пример #19
0
def parse_txts(txt_fns,
               pressure_range=[110e3, 225e3],
               name=None,
               beacon='auto',
               split_on_clock_change=True):
    """
    Parse a collection of txt files, grab detections and optionally
    clock resync events.
    beacon: 'auto' set beacon tag id from most commonly received tag id.
       None: don't set a beacon tag id
       else: use the provided value as the beacon id.
    """
    txt_fns = list(txt_fns)
    txt_fns.sort()
    dfs = []
    for fn in utils.progress(txt_fns):
        df = parse_txt(fn)
        df['fn'] = fn
        dfs.append(df)
    df = pd.concat(dfs, sort=True)  # sort columns to get alignment
    df = df.reset_index()  # will use this for slicing later

    n_unknown = (df['type'] == 'unknown').sum()
    if n_unknown > 0:
        # So far, this is just corrupt lines.  Might be able to
        # salvage the second part of corrupt lines, but it's
        # such a small number, and the second part is usually
        # just a NODE bootup msg anyway.
        log.warning("%d unknown line types in txt files" % n_unknown)

    # Do we need epoch? yes, it's used downstream
    epoch = utils.to_unix(df['time'].values)
    # It will get usec added, so be sure it's just the integer portion.
    assert np.all((epoch % 1.0)[np.isfinite(epoch)] == 0)
    df['epoch'] = epoch

    # Add microseconds to timestamps when t_usec is available
    sel = np.isfinite(df.t_usec.values)
    df.loc[sel, 'time'] = df.loc[
        sel, 'time'] + (df.loc[sel, 't_usec'].values * 1e6).astype(
            np.int32) * np.timedelta64(1, 'us')

    df_det = df[df['type'] == 'DET']

    # clean up time:
    bad_time = (df_det.time < np.datetime64('2018-01-01')) | (
        df_det.time > np.datetime64('2022-01-01'))
    df2 = df_det[~bad_time].copy()

    # clean up temperature:
    df2.loc[df2.temp < -5, 'temp'] = np.nan
    df2.loc[df2.temp > 35, 'temp'] = np.nan

    # clean up pressure
    if pressure_range is not None:
        df2.loc[df2.pressure < pressure_range[0], 'pressure'] = np.nan
        df2.loc[df2.pressure > pressure_range[1], 'pressure'] = np.nan

    # trim to first/last valid pressure
    valid_idx = np.nonzero(np.isfinite(df2.pressure.values))[0]
    df3 = df2.iloc[valid_idx[0]:valid_idx[-1] + 1, :].copy()

    df3['tag'] = [s.strip() for s in df3.tag.values]

    # narrow that to the fields we actually care about:
    fields = [
        'rx_serial', 'tag', 'time', 't_usec', 'epoch', 'corrQ', 'nbwQ',
        'pressure', 'temp', 'datetime_str', 'fn'
    ]

    ds = xr.Dataset.from_dataframe(df3.loc[:, fields])
    ds['usec'] = ds['t_usec']

    ds['cf2_filename'] = None

    if beacon == 'auto':
        beacon_id = df3.groupby('tag').size().sort_values().index[-1]
        ds['beacon_id'] = beacon_id
        ds['beacon_id'].attrs['source'] = 'received tags'
        print("auto_beacon: beacon_id inferred as ", beacon_id)
        # import pdb
        # pdb.set_trace()
    elif beacon is not None:
        ds['beacon_id'] = beacon
        ds['beacon_id'].attrs['source'] = 'Specified to parse_txts'

    ds.attrs['pressure_range'] = pressure_range

    if name is not None:
        ds['name'] = (), name

    if split_on_clock_change:
        # dice_by_clock_resets got crazy complicated.  Rather than
        # re-living that experience, try something simple here,
        # but know that we may have to come back and refactor this
        # with dice_by_clock_resets.
        sync_sel = df['sync_status'].values == 1.0
        sync_idxs = df.index.values[
            sync_sel]  # of course these won't actually be in df3!

        nonmono_sel = np.diff(df3.time.values) < np.timedelta64(0)
        # Should  mean that each item is the first index of a new chunk.
        nonmono_idxs = df3.index.values[1:][nonmono_sel]

        all_breaks = np.unique(np.concatenate((sync_idxs, nonmono_idxs)))
        all_breaks = np.r_[0, all_breaks, len(df)]

        # as indices into ds.index
        ds_breaks = np.searchsorted(ds.index.values, all_breaks)

        diced = []
        for start, stop in zip(ds_breaks[:-1], ds_breaks[1:]):
            if stop > start:
                diced.append(ds.isel(index=slice(start, stop)))
            else:
                # often will have a slice that's empty.
                pass
        return diced
    else:
        return ds
# g=unstructured_grid.UnstructuredGrid.read_ugrid(grid_fn)
# g.renumber()

##

g = unstructured_grid.UnstructuredGrid.read_ugrid(
    'pesca_butano_existing_deep_bathy.nc')

##

alpha = np.linspace(0, 1, 5)

edge_data = np.zeros((g.Nedges(), 3), np.float64)

# Find min/max/mean depth of each edge:
for j in utils.progress(range(g.Nedges())):
    pnts = (alpha[:, None] * g.nodes['x'][g.edges['nodes'][j, 0]] +
            (1 - alpha[:, None]) * g.nodes['x'][g.edges['nodes'][j, 1]])
    z = dem(pnts)
    edge_data[j, 0] = z.min()
    edge_data[j, 1] = z.max()
    edge_data[j, 2] = z.mean()
##

# This is bedlevtype=5
z_edge = (g.nodes['node_z_bed'][g.edges['nodes']]).max(axis=1)

plt.figure(1).clf()

plt.plot(z_edge, edge_data[:, 0], 'g.')
plt.plot(z_edge, edge_data[:, 1], 'b.')
Пример #21
0
def node_depths_edge_mean_opt(g,
                              target_edge_depths,
                              target_node_depths,
                              section_weight=1.0,
                              node_weight=0.1,
                              nonsection_weight=0.0,
                              max_segments_per_section=7):
    """
    return per-node elevations such that the average of
    the endpoints per edge are close to the target edge depth.
    Edges which form channel cross-sections are given precedence.

    nodes per-edge to match the mean edge elevation from the DEM.
    """
    # construct a linear system where we try to solve good elevations for
    # all of the nodes at once

    # First, do this but ignore boundary edges:
    e2c = g.edge_to_cells()
    boundary_edges = e2c.min(axis=1) < 0
    boundary_nodes = np.unique(g.edges['nodes'][boundary_edges])

    weight_by_pairs = True

    if section_weight == nonsection_weight:
        log.info(
            "Cross-section and non-cross-section weights the same.  No tracing needed"
        )
    else:
        all_pairs = find_all_pairs(g, max_segments_per_section)
        cross_edges = []
        for n1, n2 in utils.progress(all_pairs,
                                     msg="Extracting section edges %s"):
            cross_edges.append(g.shortest_path(n1, n2, return_type='edges'))
        cross_edges = np.concatenate(cross_edges)
        edge_weights = nonsection_weight * np.ones(g.Nedges(), np.float64)
        edge_weights[cross_edges] = section_weight

    rows = []
    cols = []
    data = []
    rhs = []

    for j in utils.progress(range(g.Nedges())):
        row = len(rhs)
        if boundary_edges[j]:
            continue
        if edge_weights[j] == 0:
            continue
        n1, n2 = g.edges['nodes'][j]
        rows.append(row)
        cols.append(n1)
        data.append(0.5 * edge_weights[j])
        rows.append(row)
        cols.append(n2)
        data.append(0.5 * edge_weights[j])
        rhs.append(target_edge_depths[j] * edge_weights[j])

    if node_weight > 0:
        node_weights = node_weight * np.ones(g.Nnodes(), np.float64)
        # boundary nodes we consider free
        node_weights[boundary_nodes] = 0

        for n in range(g.Nnodes()):
            if node_weights[n] != 0:
                rows.append(len(rhs))
                cols.append(n)
                data.append(node_weights[n])
                rhs.append(node_weights[n] * target_node_depths[n])

    M = sparse.coo_matrix((data, (rows, cols)), shape=(len(rhs), g.Nnodes()))

    node_depths_sparse, status, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var_ = sparse.linalg.lsqr(
        M, rhs)

    return node_depths_sparse
Пример #22
0
            # if a particle is stuck from here on out, remove it from play
            dead[:Nstep]=np.all(part_xy==end_xy[:Nstep,:],axis=1)

        for part_idx,traj in enumerate(grp_trajectories):
            trajectories[ (part_grp,part_idx) ] = traj

    return trajectories

trajectories=group_trajectories()

##

trimmed=dict()

# Trim each trajectory, and convert to numpy array
for k in utils.progress(trajectories.keys()):
    traj=trajectories[k]
    if len(traj)>2:
        t,x,y = zip(*traj)
        traj_np=np.zeros(len(traj),dtype=[('t','M8[us]'),('x',np.float64),('y',np.float64)])
        traj_np['t']=t
        traj_np['x']=x
        traj_np['y']=y
        trimmed[k]=traj_np

trimmed_l=list(trimmed.values())

##

# Detect when those particles pass through a footprint
import matplotlib.path as mpltPath
Пример #23
0
    def disp_array(self):
        self.hydro.infer_2d_elements()
        self.hydro.infer_2d_links()

        # first calculate all time steps, just in 2D.

        Q = np.zeros((len(self.hydro.t_secs), self.hydro.n_2d_links),
                     np.float64)
        A = np.zeros((len(self.hydro.t_secs), self.hydro.n_2d_links),
                     np.float64)

        for ti in utils.progress(range(len(self.hydro.t_secs))):
            t_sec = self.hydro.t_secs[ti]
            flows = [
                hydro.flows(t_sec) for hydro in [self.hydro_tidal, self.hydro]
            ]
            flow_hp = flows[0] - flows[1]
            # depth-integrate
            flow_hor = flow_hp[:self.hydro_tidal.n_exch_x]
            link_flows = np.bincount(
                self.hydro.exch_to_2d_link['link'],
                self.hydro.exch_to_2d_link['sgn'] * flow_hor)

            Q[ti, :] = link_flows**2
            A[ti, :] = np.bincount(
                self.hydro.exch_to_2d_link['link'],
                self.hydro.areas(t_sec)[:self.hydro.n_exch_x])

        dt_s = np.median(np.diff(self.hydro.t_secs))

        winsize = int(self.lowpass_days * 86400 / dt_s)
        # These are a little slow.  10s?
        # could streamline this some since we later only use a fraction of the values.

        # clip here is because in some cases the values are very low and
        # and some roundoff is creating negatives.
        Qlp = filters.lowpass_fir(Q, winsize=winsize, axis=0).clip(0)
        Alp = filters.lowpass_fir(A, winsize=winsize, axis=0).clip(0)

        rms_flows = np.sqrt(Qlp)
        mean_A = Alp

        Lexch = self.hydro.exchange_lengths.sum(axis=1)[:self.hydro.n_exch_x]
        L = [
            Lexch[exchs[0]] for l, exchs in utils.enumerate_groups(
                self.hydro.exch_to_2d_link['link'])
        ]

        # This is just a placeholder. A proper scaling needs to account for
        # cell size. rms_flows has units of m3/s. probably that should be normalized
        # by dividing by average flux area, and possibly multiplying by the distance
        # between cell centers. that doesn't seem quite right.
        link_K = self.K_scale * rms_flows * L / mean_A

        # this is computed for every time step, but we can trim that down
        # it's lowpassed at winsize.  Try stride of half winsize.
        # That was used for the first round of tests, but it looks a bit
        # sparse.
        K_stride = winsize // 4
        K2D = link_K[::K_stride, :]
        K_t_secs = self.hydro.t_secs[::K_stride]

        if self.amp_factor != 1.0:
            Kbar = K2D.mean(axis=0)
            K2D = (Kbar[None, :] + self.amp_factor *
                   (K2D - Kbar[None, :])).clip(0)

        K = np.zeros((len(K_t_secs), self.hydro.n_exch), np.float64)

        # and then project to 3D
        K[:, :self.hydro.n_exch_x] = K2D[:, self.hydro.exch_to_2d_link['link']]

        if 0:  # DEBUGGING
            # verify that I can get back to the previous, constant in time
            # run.
            log.warning("Debugging K")
            Kconst = super(KautoUnsteady, self).disp_array()
            K[:, :] = Kconst[None, :]

        log.info("Median dispersion coefficient: %g" % (np.median(K)))

        return K_t_secs, K
Пример #24
0
def set_ic_from_hycom(model,
                      hycom_ll_box,
                      cache_dir,
                      default_s=None,
                      default_T=None):
    """
    Update model.ic_ds with salinity and temperature from hycom.
    hycom_ll_box is like [-124.9, -121.7, 35.9, 39.0], and is specified here
    to make best use of cached data (so slightly mis-alignment between models doesn't require
    refetching all of the hycom files).
    where to save/find cached hycom files
    default_s, default_T: when a grid point does not intersect valid hycom data, what
     value to use.  leave as None in order to leave the value in ic_ds alone.

    In the past used: 
      default_s=33.4
      default_T=10.0
    """
    fns = hycom.fetch_range(
        hycom_ll_box[:2],
        hycom_ll_box[2:],
        [model.run_start, model.run_start + np.timedelta64(1, 'D')],
        cache_dir=cache_dir)
    hycom_ic_fn = fns[0]

    hycom_ds = xr.open_dataset(hycom_ic_fn)
    if 'time' in hycom_ds.dims:
        hycom_ds = hycom_ds.isel(time=0)
    cc = model.grid.cells_center()
    cc_ll = model.native_to_ll(cc)

    # Careful - some experiments (such as 92.8) have lon in [0,360],
    # while later ones have lon in [-180,180]
    # this forces all to be [-180,180]
    hycom_ds.lon.values[:] = (hycom_ds.lon.values + 180) % 360.0 - 180.0

    dlat = np.median(np.diff(hycom_ds.lat.values))
    dlon = np.median(np.diff(hycom_ds.lon.values))
    lat_i = utils.nearest(hycom_ds.lat.values, cc_ll[:, 1], max_dx=1.2 * dlat)
    lon_i = utils.nearest(hycom_ds.lon.values, cc_ll[:, 0], max_dx=1.2 * dlon)

    # make this positive:down to match hycom and make the interpolation
    sun_z = -model.ic_ds.z_r.values

    assert ('time', 'Nk',
            'Nc') == model.ic_ds.salt.dims, "Workaround is fragile"

    for scal, hy_var, sun_var, default in [
        ('s', 'salinity', 'salt', default_s),
        ('T', 'water_temp', 'temp', default_T)
    ]:
        if scal == 's' and float(model.config['beta']) == 0.0:
            continue
        if scal == 'T' and float(model.config['gamma']) == 0.0:
            continue

        for c in utils.progress(range(model.grid.Ncells()),
                                msg="HYCOM initial condition %s %%s" % scal):
            sun_val = default

            if lat_i[c] < 0 or lon_i[c] < 0:
                print("Cell %d does not overlap HYCOM grid" % c)
            else:
                # top to bottom, depth positive:down
                val_profile = hycom_ds[hy_var].isel(lon=lon_i[c],
                                                    lat=lat_i[c]).values
                valid = np.isfinite(val_profile)
                if not np.any(valid):
                    # print("Cell %d is dry in HYCOM grid"%c)
                    pass
                else:
                    # could add bottom salinity if we really cared.
                    sun_val = np.interp(sun_z, hycom_ds.depth.values[valid],
                                        val_profile[valid])
            if sun_val is not None:
                model.ic_ds[sun_var].values[0, :, c] = sun_val
Пример #25
0
poly_rio = np.array([[615061., 4224762.], [616046., 4224084.],
                     [615810., 4223645.], [614768., 4224230.]])

# fast lookups via matplotlib:
dead_polys = [poly_geo, poly_rio]
from matplotlib import path
dead_paths = [path.Path(poly) for poly in dead_polys]

# dead_cells |= .contains_points(ctrs)

##

data_dir = "csvs"
os.path.exists(data_dir) or os.mkdir(data_dir)

for step in utils.progress(range(1000)):
    fn = os.path.join(data_dir, "combined_%04d.csv" % step)
    if os.path.exists(fn): continue

    dfs = []
    for src_i, src in enumerate(ptm_data):
        t, parts = src.read_timestep(step)
        sel = np.ones(len(parts), np.bool8)
        for dp in dead_paths:
            sel &= ~dp.contains_points(parts['x'][:, :2])

        df = pd.DataFrame()
        df['x'] = parts['x'][sel, 0]
        df['y'] = parts['x'][sel, 1]
        df['group'] = src_i
        dfs.append(df)
Пример #26
0
    if np.sum(valid) == 0:
        return np.nan

    gam = LinearGAM(
        s(0, n_splines=4) + s(1, n_splines=5) +
        te(0, 1, n_splines=4)).gridsearch(sample_st, sample_z)
    z_pred = gam.predict(np.array([[0, 0]]))[0]
    return z_pred


meth = 'gam'

if meth == 'invdist':
    meth_pretty = "Inverse Distance\np=%g, $\\alpha$=%g" % (power, aniso)
    params = "p%g_aniso%g" % (-power, aniso)
    z_result = [interp_invdist(data) for data in utils.progress(all_data)]
if meth == 'invdist_plane':
    meth_pretty = "IDW plane\np=%g, $\\alpha$=%g" % (power, aniso)
    params = "p%g_aniso%g" % (-power, aniso)
    z_result = [
        interp_invdist_plane(data) for data in utils.progress(all_data)
    ]
if meth == 'krige':
    meth_pretty = "Gaussian Process"
    params = ""
    z_result = [interp_krige(data) for data in utils.progress(all_data)]
if meth == 'gam':
    meth_pretty = "GAM"
    params = "v2"
    z_result = [interp_gam(data) for data in utils.progress(all_data)]
Пример #27
0
def add_wind_coamps_sfei(model,
                         cache_dir,
                         pad=np.timedelta64(3 * 3600, 's'),
                         coamps_buffer=30e3,
                         air_temp=False):
    """
    model: A HydroModel instance
    cache_dir: path for caching wind data
    pad: how far before/after the simulation the wind dataset should extend
    
    Combine SFEI interpolated winds and COAMPS winds.
    coamps_buffer: coamps samples within this distance of SFEI data are omitted.

    This method does not work so well with SUNTANS.  The available interpolation
    methods (inverse distance and kriging) do not deal well with having two 
    distinct, densely sampled datasets with a gap in between.

    air_temp: if 'coamps', fill in air temperature samples from coamps data.
    """
    g = model.grid

    period_start = model.run_start - pad
    period_stop = model.run_stop + pad

    fields = ['wnd_utru', 'wnd_vtru', 'pres_msl']
    if air_temp == 'coamps':
        # may add sol_rad at some point...
        fields += ['air_temp', 'rltv_hum']
    coamps_ds = coamps.coamps_dataset(g.bounds(),
                                      period_start,
                                      period_stop,
                                      cache_dir=cache_dir,
                                      fields=fields)

    sfei_ds = xr.open_dataset('wind_natneighbor_WY2017.nc')
    # SFEI data is PST
    logging.info(sfei_ds.time.values[0])
    sfei_ds.time.values[:] += np.timedelta64(8 * 3600, 's')
    logging.info(sfei_ds.time.values[0])  # just to be sure it took.
    sfei_ds.time.attrs['timezone'] = 'UTC'
    # Not ready for other years
    assert sfei_ds.time.values[
        0] <= period_start, "FIX: SFEI wind only setup for 2017"
    assert sfei_ds.time.values[
        -1] >= period_stop, "FIX: SFEI wind only setup for 2017"

    # buffer out the model domain a bit to get a generous footprint
    g_poly = geometry.Polygon(g.boundary_polygon().exterior).buffer(3000)

    # For each of the sources, which points will be included?
    # Add a mask variable for each dataset
    exclude_poly = None
    for src_name, src_ds in [('SFEI', sfei_ds), ('COAMPS', coamps_ds)]:
        fld = field.SimpleGrid(extents=[
            src_ds.x.values[0], src_ds.x.values[-1], src_ds.y.values[0],
            src_ds.y.values[-1]
        ],
                               F=src_ds.wind_u.isel(time=0).values)
        logging.info("%s: gridded resolution: %.2f %.2f" %
                     (src_name, fld.dx, fld.dy))
        mask = fld.polygon_mask(g_poly)
        logging.info("%s: %d of %d samples fall within grid" %
                     (src_name, mask.sum(), mask.size))
        if exclude_poly is not None:
            omit = fld.polygon_mask(exclude_poly)
            mask = mask & (~omit)
            logging.info(
                "%s: %d of %d samples fall within exclusion poly, will use %d"
                % (src_name, omit.sum(), omit.size, mask.sum()))

        src_ds['mask'] = src_ds.wind_u.dims[1:], mask

        # Add these points to the exclusion polygon for successive sources
        X, Y = fld.XY()
        xys = np.c_[X[mask], Y[mask]]
        pnts = [geometry.Point(xy[0], xy[1]) for xy in xys]
        poly = cascaded_union([p.buffer(coamps_buffer) for p in pnts])
        if exclude_poly is None:
            exclude_poly = poly
        else:
            exclude_poly = exclude_poly.union(poly)

    #  Trim to the same period
    # SFEI
    time_slc = (sfei_ds.time.values >= period_start) & (sfei_ds.time.values <=
                                                        period_stop)
    sfei_sub_ds = sfei_ds.isel(time=time_slc)

    # COAMPS
    time_slc = (coamps_ds.time.values >= period_start) & (coamps_ds.time.values
                                                          <= period_stop)
    coamps_sub_ds = coamps_ds.isel(time=time_slc)

    # make sure that worked:
    assert np.all(sfei_sub_ds.time.values == coamps_sub_ds.time.values)

    times = sfei_sub_ds.time.values

    # Now we start to break down the interface with model, as wind is not really
    # ready to go.

    met_ds = model.zero_met(times=times)

    srcs = [sfei_sub_ds, coamps_sub_ds]
    src_counts = [src.mask.values.sum() for src in srcs]
    n_points = np.sum(src_counts)

    xcoords = []
    ycoords = []
    for src in srcs:
        X, Y = np.meshgrid(src.x.values, src.y.values)
        xcoords.append(X[src.mask.values])
        ycoords.append(Y[src.mask.values])
    xcoords = np.concatenate(xcoords)
    ycoords = np.concatenate(ycoords)

    # Replace placeholder coordinates for wind variables.
    for name in ['Uwind', 'Vwind']:
        del met_ds["x_" + name]
        del met_ds["y_" + name]
        del met_ds["z_" + name]
        del met_ds[name]

        met_ds["x_" + name] = ("N" + name), xcoords
        met_ds["y_" + name] = ("N" + name, ), ycoords
        met_ds["z_" + name] = ("N" + name, ), 10.0 * np.ones_like(xcoords)

    Uwind_t = []
    Vwind_t = []
    for ti in utils.progress(range(len(times)), msg="Compiling wind: %s"):
        Uwind = []
        Vwind = []
        for src in srcs:
            Uwind.append(src.wind_u.isel(time=ti).values[src.mask])
            Vwind.append(src.wind_v.isel(time=ti).values[src.mask])
        Uwind = np.concatenate(Uwind)
        Vwind = np.concatenate(Vwind)
        Uwind_t.append(Uwind)
        Vwind_t.append(Vwind)
    met_ds['Uwind'] = ('nt', "NUwind"), np.stack(Uwind_t)
    met_ds['Vwind'] = ('nt', "NVwind"), np.stack(Vwind_t)

    logging.info("New Met Dataset:")
    logging.info(str(met_ds))
    model.met_ds = met_ds
    if int(model.config['metmodel']) not in [0, 4]:
        logging.warning("Adding wind, will override metmodel %s => %d" %
                        (model.config['metmodel'], 4))
    model.config['metmodel'] = 4  # wind only
Пример #28
0
def add_wind_preblended(model, cache_dir, pad=np.timedelta64(3 * 3600, 's')):
    """
    model: A HydroModel instance
    cache_dir: path for caching wind data
    pad: how far before/after the simulation the wind dataset should extend
    
    Add wind data from pre-blended netcdf output
    """
    g = model.grid

    period_start = model.run_start - pad
    period_stop = model.run_stop + pad

    # note that this is a bit different than the sfei data
    #  - already in UTC
    #  - larger footprint, coarser grid
    #  - natural neighbors run on the observed + COAMPS (thinned).
    blended_ds = blended_dataset(period_start, period_stop)

    # Not ready for other years
    assert blended_ds.time.values[
        0] <= period_start, "FIX: pre-blended wind only set up for some of 2017"
    assert blended_ds.time.values[
        -1] >= period_stop, "FIX: pre-blended wind only set up for some of 2017"

    # buffer out the model domain a bit to get a generous footprint
    g_poly = geometry.Polygon(g.boundary_polygon().exterior).buffer(3000)

    # For each of the sources, which points will be included?
    # Add a mask variable for each dataset
    exclude_poly = None
    for src_name, src_ds in [('BLENDED', blended_ds)]:
        fld = field.SimpleGrid(extents=[
            src_ds.x.values[0], src_ds.x.values[-1], src_ds.y.values[0],
            src_ds.y.values[-1]
        ],
                               F=src_ds.wind_u.isel(time=0).values)
        logging.info("%s: gridded resolution: %.2f %.2f" %
                     (src_name, fld.dx, fld.dy))
        mask = fld.polygon_mask(g_poly)
        logging.info("%s: %d of %d samples fall within grid" %
                     (src_name, mask.sum(), mask.size))
        if exclude_poly is not None:
            omit = fld.polygon_mask(exclude_poly)
            mask = mask & (~omit)
            logging.info(
                "%s: %d of %d samples fall within exclusion poly, will use %d"
                % (src_name, omit.sum(), omit.size, mask.sum()))

        src_ds['mask'] = src_ds.wind_u.dims[1:], mask

    #  Trim to the same period
    time_slc = (blended_ds.time.values >=
                period_start) & (blended_ds.time.values <= period_stop)
    blended_sub_ds = blended_ds.isel(time=time_slc)

    times = blended_sub_ds.time.values

    # Now we start to break down the interface with model, as wind is not really
    # ready to go.

    met_ds = model.zero_met(times=times)

    srcs = [blended_sub_ds]
    src_counts = [src.mask.values.sum() for src in srcs]
    n_points = np.sum(src_counts)

    xcoords = []
    ycoords = []
    for src in srcs:
        X, Y = np.meshgrid(src.x.values, src.y.values)
        xcoords.append(X[src.mask.values])
        ycoords.append(Y[src.mask.values])
    xcoords = np.concatenate(xcoords)
    ycoords = np.concatenate(ycoords)

    # Replace placeholder coordinates for wind variables.
    for name in ['Uwind', 'Vwind']:
        del met_ds["x_" + name]
        del met_ds["y_" + name]
        del met_ds["z_" + name]
        del met_ds[name]

        met_ds["x_" + name] = ("N" + name), xcoords
        met_ds["y_" + name] = ("N" + name, ), ycoords
        met_ds["z_" + name] = ("N" + name, ), 10.0 * np.ones_like(xcoords)

    Uwind_t = []
    Vwind_t = []
    for ti in utils.progress(range(len(times)), msg="Compiling wind: %s"):
        Uwind = []
        Vwind = []
        for src in srcs:
            Uwind.append(src.wind_u.isel(time=ti).values[src.mask])
            Vwind.append(src.wind_v.isel(time=ti).values[src.mask])
        Uwind = np.concatenate(Uwind)
        Vwind = np.concatenate(Vwind)
        Uwind_t.append(Uwind)
        Vwind_t.append(Vwind)
    met_ds['Uwind'] = ('nt', "NUwind"), np.stack(Uwind_t)
    met_ds['Vwind'] = ('nt', "NVwind"), np.stack(Vwind_t)

    logging.info("New Met Dataset:")
    logging.info(str(met_ds))
    model.met_ds = met_ds
    if int(model.config['metmodel']) not in [4, 5]:
        logging.warning("While adding wind, noticed metmodel %s" %
                        (model.config['metmodel']))
Пример #29
0
    
    if len(track)<2:
        results.append(None)
        continue 

    track=track.copy()
    for fld in ['x','y','tnum']:
        fld_m=0.5*( track[fld].values[:-1] +
                   track[fld].values[1:] )
        track[fld+'_m']=np.r_[ fld_m, np.nan ]
        
    track['time_m']=utils.unix_to_dt64(track['tnum_m'].values)
    
    new_records=[]
    
    for i,seg in utils.progress( track.iloc[:-1,:].iterrows() ):
        rec={} # new fields to be added to segments.
        t=seg['time_m'].to_datetime64()
        run_i,t_i=quantize_time(t)

        rec['index']=i

        for slice_name,slice_def in z_slices:
            Uint=interpolator(run_i,t_i,**slice_def)
        
            # vorticity at centers
            x_samp=np.array( [ [seg['x_m']    , seg['y_m']      ],
                               [seg['x_m']-eps, seg['y_m']      ],
                               [seg['x_m']+eps, seg['y_m']      ],
                               [seg['x_m']    , seg['y_m'] - eps],
                               [seg['x_m']    , seg['y_m'] + eps]
Пример #30
0
def agg_decay_tracers(model, hydro_orig, agg_grid, force='auto'):
    def reckless_nanmean(*a, **kw):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            return np.nanmean(*a, **kw)

    # pre-calculated the data on the original grid,
    # aggregate, write to netcdf.
    precalc_fn = os.path.join(model.base_path, 'decay_summary_v00.nc')

    age_orig_agg2ds = []

    if os.path.exists(precalc_fn):
        if force == 'auto':
            # compare precalc_fn to map_fn
            force = utils.is_stale(precalc_fn, [model.map_ds_path()])
        if force:
            os.unlink(precalc_fn)

    if not os.path.exists(precalc_fn):
        model_grid = model.load_hydro().grid()

        if ((agg_grid.Ncells() == model_grid.Ncells()) and (np.allclose(
                model_grid.cells_centroid(), agg_grid.cells_centroid()))):
            needs_aggregation = False
        else:
            needs_aggregation = True

        orig_map_ds = model.map_ds()
        layers = orig_map_ds.dims['layer']

        # These could get large -- try to build it iteratively.
        ds = xr.Dataset()
        ds['time'] = orig_map_ds['time']
        ds['t_sec'] = orig_map_ds['t_sec']
        agg_grid.write_to_xarray(ds)
        ds.to_netcdf(precalc_fn)
        ds.close()

        nc = netCDF4.Dataset(precalc_fn, mode='r+')

        def condense(scal):
            """
            take a 3D, potentially unaggregated scalar field,
            aggregate, nanmean, and condense to 2D.
            """
            # then aggregate but avoid nan contamination
            if needs_aggregation:
                scal_agg = agger(hydro_orig, agg_grid).segment_aggregator(
                    t_sec, scal.ravel(), nan_method='ignore').reshape(
                        (layers, -1))
            else:
                scal_agg = scal
            scal_agg2d = reckless_nanmean(scal_agg, axis=0)
            return scal_agg2d

        for tracer in range(5):  # max number of decay tracers
            cTr = "cTr%d" % (tracer + 1)
            dTr = "dTr%d" % (tracer + 1)
            if cTr not in orig_map_ds:
                break
            log.info("%s: Processing tracers %s, %s" % (precalc_fn, cTr, dTr))

            nc_var = nc.createVariable('age%d' % (tracer + 1),
                                       np.float32, ('time', 'face'),
                                       zlib=True,
                                       complevel=2)
            nc_var_cons = nc.createVariable(cTr,
                                            np.float32, ('time', 'face'),
                                            zlib=True,
                                            complevel=2)
            nc_var_decay = nc.createVariable(dTr,
                                             np.float32, ('time', 'face'),
                                             zlib=True,
                                             complevel=2)

            for t_idx in utils.progress(range(orig_map_ds.dims['time'])):
                t_sec = orig_map_ds['t_sec'].isel(time=t_idx).values
                cTr_orig = orig_map_ds[cTr].isel(time=t_idx).values
                dTr_orig = orig_map_ds[dTr].isel(time=t_idx).values
                age_orig = recalc_age(cTr_orig, dTr_orig)

                nc_var[t_idx, :] = condense(age_orig)
                nc_var_cons[t_idx, :] = condense(cTr_orig)
                nc_var_decay[t_idx, :] = condense(dTr_orig)
            nc.sync()
        nc.close()

    return xr.open_dataset(precalc_fn)