Esempio n. 1
0
def splice_shore(roms_pnt, dfm_pnt):
    # May have to do some smoothing on the shoreline before this step
    # Need to extract a linestring from the shoreline which joins
    # those two points in the grid.
    shore_string = np.array(land_smooth.exterior)

    roms_best = np.argmin(utils.dist(shore_string, roms_pnt))
    dfm_best = np.argmin(utils.dist(shore_string, dfm_pnt))
    shore_substring = shore_string[min(roms_best, dfm_best
                                       ):max(roms_best, dfm_best) + 1]

    # Do this before adding the new nodes
    roms_node = g_merge.select_nodes_nearest(roms_pnt)
    dfm_node = g_merge.select_nodes_nearest(dfm_pnt)

    new_nodes = [g_merge.add_node(x=x) for x in shore_substring]
    new_edges = [
        g_merge.add_edge(nodes=[a, b])
        for a, b in zip(new_nodes[:-1], new_nodes[1:])
    ]

    # careful of the order!
    if roms_best < dfm_best:
        g_merge.add_edge(nodes=[new_nodes[0], roms_node])
        g_merge.add_edge(nodes=[new_nodes[-1], dfm_node])
    else:
        g_merge.add_edge(nodes=[new_nodes[-1], roms_node])
        g_merge.add_edge(nodes=[new_nodes[0], dfm_node])
Esempio n. 2
0
def test_distance_away2():
    # Towards a smarter Curve::distance_away(), which understands
    # piecewise linear geometry
    island = np.array([[200, 200], [600, 200], [200, 600]])
    curve = front.Curve(island)

    anchor_f = 919.3
    signed_distance = 50.0
    res = curve.distance_away(anchor_f, signed_distance)
    assert res[0] > anchor_f
    anchor_pnt = curve(anchor_f)

    rel_err = np.abs(utils.dist(anchor_pnt - res[1]) -
                     abs(signed_distance)) / abs(signed_distance)
    assert np.abs(rel_err) <= 0.05

    anchor_f = 440
    signed_distance = -50.0
    res = curve.distance_away(anchor_f, signed_distance)

    anchor_pnt = curve(anchor_f)

    rel_err = np.abs(utils.dist(anchor_pnt - res[1]) -
                     abs(signed_distance)) / abs(signed_distance)
    assert res[0] < anchor_f
    assert np.abs(rel_err) <= 0.05
Esempio n. 3
0
    def set_edge_cell_spacings(self):
        """
        Set dc[edge] = cell-to-cell distannce per edge
        and dist[i,l] = cell-to-cellface distance
        and alpha[j,2] = cell to edge interpolation weights

        calculated as euclidean distance
        """
        ii = self.edge_to_cells_reflect
        cc = self.grd.cells_center()

        # this implies some decisions about how to handle non
        # orthogonal cells. could calculate edge-normal signed
        # distance, with or without clipping,
        self.dcj = utils.dist(self.exy[:, None, :] - cc[ii])
        self.dc = self.dcj.sum(axis=1)

        dist = np.zeros((self.ncells, max_sides), np.float64)
        for i in range(self.ncells):
            for l in range(self.ncsides[i]):
                j = self.grd.cells[i]['edges'][l]
                dist[i, l] = utils.dist(self.exy[j], cc[i])

        self.dist = dist
        self.alpha = 0.5 * np.zeros((self.nedges, 2), np.float64)
        self.alpha[self.intern, :] = self.dcj[self.intern] / self.dc[
            self.intern, None]
        assert np.all(self.dc[self.intern] > 0)
Esempio n. 4
0
def data_to_hyperbola(data,a=0,b=1):
    s=np.linspace(-5,5,100)

    # Draw hyperbolas from data:
    pa=np.r_[ data['rx_x'][a], data['rx_y'][a] ]
    pb=np.r_[ data['rx_x'][b], data['rx_y'][b] ]

    f0=0.5*(pa+pb)

    dab=pb-pa
    theta=np.arctan2(dab[1],dab[0]) # for the rotation
    focus_dist=utils.dist(dab)

    delta_ab=(data['rx_t'][b] - data['rx_t'][a])*0.5*(data['rx_c'][a]+data['rx_c'][b])

    # well, I can get the eccentricity, right?
    hyp_c=0.5*utils.dist(pa-pb) # center to one focus
    # distance from a point to other focus
    #         distance to this focus
    # (c+a) - (c-a) = delta_ab
    hyp_a=-delta_ab/2

    ecc=hyp_c/hyp_a

    # ecc = sqrt(1+b^2/a^2)
    # ecc^2-1 = b^2/a^2
    # take a^2=1
    # b^2=ecc^2-1
    # b=sqrt( ecc^2-1)
    B=np.sqrt(ecc**2-1)

    hxy_sim=np.c_[np.cosh(s),
                  B*np.sinh(s)]
    if 0:
        hxy_sim_ref=np.c_[-np.cosh(s),
                          B*np.sinh(s)]
        # while working through this, include the reflection
        hxy_sim=np.concatenate( [hxy_sim,
                                 [[np.nan,np.nan]],
                                 hxy_sim_ref])

    # so a point on the hyperbola is at [1,0]
    # and then the focus is at [ecc,0]

    f1=np.array([ecc,0])
    f2=-f1

    deltas=utils.dist( hxy_sim-f1) - utils.dist(hxy_sim-f2)

    # scale it - focus is current at ecc, and I want it at
    # hyp_c
    # hyp_c/ ecc =hyp_a
    hxy_cong=hxy_sim * hyp_a

    hxy_final=utils.rot(theta,hxy_cong) + f0 + data['xy0']

    return hxy_final
Esempio n. 5
0
def one_point_quad_cost(x0, edge_scales, quads, para_scale, perp_scale):
    """
    edge_scales: not used yet.
    quads: [N,4,2] coordinates for the quads.  the node being moved appears
      first for each.
    """
    quads[:, 0, :] = x0
    cc_cost = 0

    del ccs[:]
    del radii[:]

    for quad_i in range(quads.shape[0]):
        # This leads to really bad orthogonality
        #tri_cc=utils.poly_circumcenter(quads[quad_i,:,:])
        # much better
        tri_cc = utils.poly_circumcenter(quads[quad_i, :3, :])
        ccs.append(tri_cc)

        this_quad_cc_cost = 0
        radius = 0

        for side in range(4):
            sidep1 = (side + 1) % 4

            p1 = quads[quad_i, side]
            p2 = quads[quad_i, sidep1]

            deltaAB = tri_cc - p1

            radius += utils.dist(deltaAB)

            AB = p2 - p1
            magAB = math.sqrt(AB[0] * AB[0] + AB[1] * AB[1])
            vecAB = AB / magAB
            leftAB = vecAB[0] * deltaAB[1] - vecAB[1] * deltaAB[0]

            cc_fac = -4.  # not bad
            # clip to 100, to avoid overflow in math.exp
            this_edge_cc_cost = math.exp(min(100, cc_fac * leftAB / magAB))
            this_quad_cc_cost += this_edge_cc_cost

        radii.append(radius / 4.)

        cc_cost += this_quad_cc_cost

    dists = utils.dist(x0 - edge_scales[:, :2])
    tgts = edge_scales[:, 2]
    scale_cost = np.sum((dists - tgts)**2 / tgts**2)

    # With even weighting between these, some edges are pushed long rather than
    # having nice angles.
    # 3 is a shot in the dark.
    # 50 is more effective at avoiding a non-orthogonal cell
    # 50 was good for triangles.
    # trying more on the scale here
    return 5 * cc_cost + scale_cost
Esempio n. 6
0
def update(ti):
    del ax.collections[1:]
    del ax.lines[:]
    t_a, parts_a = dist_bspp.read_timestep(ts=ti)
    t_b, parts_b = dist_nobspp.read_timestep(ts=ti)

    segs = np.concatenate(
        [parts_a['x'][:, None, :2], parts_b['x'][:, None, :2]], axis=1)

    mode = 'dist_at_a'
    validate = 'never_stuck'

    valid = np.ones(len(parts_a), np.bool8)

    if validate == 'depth':
        time = utils.to_dt64(dist_bspp.time[ti])
        depths_a = particle_water_depth(parts_a['x'][:, :2], time)
        depths_b = particle_water_depth(parts_b['x'][:, :2], time)
        valid = valid & (depths_a > 0.05) & (depths_b >= 0.05)
    if validate == 'never_stuck':
        valid = valid & valid_never_stuck

    del ax.lines[:]

    segs = segs[valid, :, :]

    items = []
    if mode == 'segs':
        lcoll = collections.LineCollection(segs, color='m', lw=1.0)
        ax.add_collection(lcoll)
        items.append(lcoll)
    elif mode == 'dist_at_a':
        separations = utils.dist(segs[:, 0, :], segs[:, 1, :])
        order = np.argsort(separations)
        scat = ax.scatter(segs[order, 0, 0],
                          segs[order, 0, 1],
                          20,
                          separations[order],
                          cmap='jet')
        items.append(scat)
    elif mode == 'dist_at_b':
        separations = utils.dist(segs[:, 0, :], segs[:, 1, :])
        order = np.argsort(separations)
        scat = ax.scatter(segs[order, 1, 0],
                          segs[order, 1, 1],
                          20,
                          separations[order],
                          cmap='jet')
        items.append(scat)

    if mode != 'segs':
        cax.cla()
        plt.colorbar(items[0], cax=cax)
        items[0].set_clim([0, 500])

    return items
Esempio n. 7
0
def cost(vec):
    shifts, ping_xyt = vec_to_params(vec)

    adj_times = matrix.copy()
    adj_times[:, 1:] += shifts[None, :]

    transit_times = adj_times - ping_xyt[:, 2, None]
    c = 1500  # constant speed of sound for the moment.
    transit_dist = transit_times * c

    #         (2344, 2)       (12,2)
    geo_dists = utils.dist(ping_xyt[:, None, :2] - rx_xy[None, :])

    errors = (transit_dist - geo_dists)

    mse = np.nanmean(errors**2)  # m^2 errors
    cost = mse

    # So being off by 100m is 1e4 error, same as having
    # a -10ms travel time.

    # bad_transit=transit_times[np.isfinite(transit_times)]
    # neg_transit_cost=(bad_transit.clip(None,0)**2 * 1e8).sum()
    # cost+=neg_transit_cost

    #print("%.2f"%mse)
    return cost
Esempio n. 8
0
def test_distance_away():
    crv = hex_curve()

    if plt:
        plt.clf()
        crv.plot()
        plt.axis('equal')

    rtol = 0.05

    for f00, tgt, style in [(0, 10, 'g-'), (3.4, 20, 'r-'), (3.4, -20, 'r--')]:
        for f0 in np.linspace(f00, crv.distances[-1], 20):
            x0 = crv(f0)
            f, x = crv.distance_away(f0, tgt, rtol=rtol)
            d = utils.dist(x - x0)
            assert np.abs((d - np.abs(tgt)) / tgt) <= rtol
            if plt:
                plt.plot([x0[0], x[0]], [x0[1], x[1]], style)

    try:
        f, x = crv.distance_away(0.0, 50, rtol=0.05)
        raise Exception("That was supposed to fail!")
    except crv.CurveException:
        #print "Okay"
        pass
Esempio n. 9
0
    def profile(x, s, perp):
        probe_left = geometry.LineString([x, x + L * perp])
        probe_right = geometry.LineString([x, x - L * perp])

        left_cross = smooth_bound.exterior.intersection(probe_left)
        right_cross = smooth_bound.exterior.intersection(probe_right)

        assert left_cross.type == 'Point', "Fix this for multiple intersections"
        assert right_cross.type == 'Point', "Fix this for multiple intersections"

        pnt_left = np.array(left_cross)
        pnt_right = np.array(right_cross)
        d_left = utils.dist(x, pnt_left)
        d_right = utils.dist(x, pnt_right)

        return np.interp(np.linspace(-1, 1, M), [-1, 0, 1],
                         [-d_right, 0, d_left])
Esempio n. 10
0
def steady_streamline_oneway(g, Uc, x0, max_t=3600, max_dist=np.inf):
    # trace some streamlines
    x0 = np.asarray(x0)
    t0 = 0.0

    c = g.select_cells_nearest(x0, inside=True)
    t = 0.0  # steady field, start the counter at 0.0
    #edge_norm=g.edges_normals()
    #edge_ctr=g.edges_center()
    x = x0.copy()
    pnts = [x.copy()]
    cells = [c]  # for debugging track the past cells
    last_cell = c
    last_path = g.cell_path(c)

    # e2c=g.edges['cells']

    def veloc(time, x):
        """ velocity at location x[0],x[1] """
        if last_path.contains_point(x):
            c = last_cell
        else:
            c = g.select_cells_nearest(x, inside=True)
        if c is not None:
            return Uc[c]
        else:
            return np.zeros(2)
        #t=finder(x[0],x[1])
        #u=u_coeffs[t,0]*x[0]+u_coeffs[t,1]*x[1]+u_coeffs[t,2]
        #v=v_coeffs[t,0]*x[0]+v_coeffs[t,1]*x[1]+v_coeffs[t,2]
        #return [u,v]

    ivp = scipy.integrate.RK45(veloc, t0=t0, y0=x0, t_bound=max_t, max_step=1)
    d = 0.0
    while ivp.status == 'running':
        ivp.step()
        d += utils.dist(pnts[-1], ivp.y)
        pnts.append(ivp.y.copy())
        if c is not None:
            cells.append(c)
        else:
            cells.append(-1)

        if not last_path.contains_point(ivp.y):
            c = g.select_cells_nearest(ivp.y, inside=True)
            if c is not None:
                last_cell = c
                last_path = g.cell_path(c)

        if d >= max_dist:
            break

    pnts = np.array(pnts)
    cells = np.array(cells)
    ds = xr.Dataset()
    ds['x'] = ('time', 'xy'), pnts
    ds['cell'] = ('time', ), cells
    return ds
Esempio n. 11
0
def cost2(xy,data):
    rx_xy=np.c_[ data['rx_x'], data['rx_y']]
    dists=utils.dist(xy - rx_xy)
    transits=dists/data['rx_c']
    d_transits=np.diff(transits)
    d_times=np.diff(data['rx_t'])
    
    err=( (d_transits-d_times)**2 ).sum()
    return err
Esempio n. 12
0
def filter_fixes_by_speed(fish,max_speed=5.0):
    # pre-allocate, in case there are indices that
    # have no fixes.
    posns=[ [] ]*fish.dims['index']
    fix_xy=np.c_[ fish.fix_x.values,
                  fish.fix_y.values ]
    for key,grp in utils.enumerate_groups(fish.fix_idx.values):
        posns[key].append(fix_xy[grp])

    for idx in range(fish.dims['index']):
        if len(posns[idx])<2: continue # only filtering out multi-fixes
        
        for pxy in posns[idx]:
            for pxy_previous in posns[idx-1]:
                dist=utils.dist(pxy-pxy_previous)
                dt=HERE - but use YAPS instead
Esempio n. 13
0
def plot_pnt_pair(pnt_pair, node_depths):
    m = eval_pnt_pair(pnt_pair, node_depths=node_depths)

    ctr = np.array(pnt_pair).mean(axis=0)
    dist = utils.dist(pnt_pair[0], pnt_pair[1])

    zoom = [
        ctr[0] - 2 * dist, ctr[0] + 2 * dist, ctr[1] - 2 * dist,
        ctr[1] + 2 * dist
    ]

    # analyze a pair of points
    plt.figure(11).clf()
    fig, axs = plt.subplots(1, 2, num=11)

    ax_g, ax_z = axs

    g.plot_edges(ax=ax_g, clip=zoom)

    ax_g.plot(m['path_xy'][:, 0], m['path_xy'][:, 1], 'g-o')
    ax_g.axis('equal')
    ax_g.axis(zoom)

    ax_z.plot(m['path_dist'], m['path_z'], 'g-o', label='node elevations')

    ax_z.plot(m['resamp_dist'], m['resamp_z'], 'k-', label='DEM')
    ax_z.plot(m['resamp_dist'], m['resamp_z_typ3'], 'b-', label='typ 3')

    ax_z.axhline(m['ref_eta'],
                 color='gray',
                 lw=0.5,
                 label='Ref eta=%.1f' % m['ref_eta'])

    ax_z.legend()

    lines = [
        "XS area error\n%.1f m2\n%.1f%%" %
        (m['A_err'], 100 * m['A_err'] / m['A_dem']),
        "Length\n%.1f m\n%.1f%%" % (m['L_err'], 100 * m['L_err'] / m['L_dem'])
    ]
    ax_z.text(0.01, 0.15, "\n".join(lines), transform=ax_z.transAxes)
    return m, ax_g, ax_z
Esempio n. 14
0
def grid_to_graph(g):
    # use cell-to-cell connections to make it easier to avoid
    # hopping over land
    e2c=g.edge_to_cells()
    internal=np.all(e2c>=0,axis=1)
    c1=e2c[internal,0]
    c2=e2c[internal,1]
    cc=g.cells_center()
    lengths=utils.dist(cc[c1],cc[c2])
    bi_lengths=np.concatenate([lengths,lengths])
    bi_lengths=bi_lengths.astype(np.float32) # more than enough precision.
    A=np.concatenate((c1,c2))
    B=np.concatenate((c2,c2))
    graph=sparse.csr_matrix( (bi_lengths,(A,B)),
                             shape=(g.Ncells(),g.Ncells()) )
    # use scipy graph algorithms to find the connections
    # dists=csgraph.shortest_path(graph,directed=False,indices=cloud_nodes)
    # that ends up being (32,Nnodes), where 32 is the number of unique
    # nodes we started with
    return graph
Esempio n. 15
0
    def init_exchanges(self):
        # Build metadata for exchanges:

        self.meta_exchange = np.zeros(
            self.Nexchange,
            [
                ('src_seg', 'i4'),
                ('dst_seg', 'i4'),
                ('orient', 'i4'),
                # length scale constant
                ('L0', 'f8'),
                # length scale as factor * depth
                ('Lfac', 'f8')
            ])

        self.meta_exchange['orient'][:self.Nexchange_hor] = self.HOR
        self.meta_exchange['orient'][self.Nexchange_hor:] = self.VER
        e2c = self.g.edge_to_cells()

        centers = g.cells_center()  # can use circumcenters in testing at least

        # Fill in the horizontal exchanges:
        for j in range(self.Nlink):
            c0, c1 = e2c[j]
            for k in range(self.n_layer):
                j3d = self.exch3d_hor(j, k)
                self.meta_exchange['src_seg'][j3d] = self.seg3d(c0, k)
                self.meta_exchange['dst_seg'][j3d] = self.seg3d(c1, k)
                self.meta_exchange['L0'][j3d] = utils.dist(centers[c0] -
                                                           centers[c1])
                self.meta_exchange['Lfac'][j3d] = 0.0
        # Fill in the vertical exchanges:
        for c in range(self.Nelement):
            for k in range(self.n_layer - 1):
                j3d = self.exch3d_ver(c, k)
                self.meta_exchange['src_seg'][j3d] = self.seg3d(c, k)
                self.meta_exchange['dst_seg'][j3d] = self.seg3d(c, k + 1)
                self.meta_exchange['L0'][j3d] = 0.0
                self.meta_exchange['Lfac'][j3d] = 0.5 * (self.d_sigmas[k] +
                                                         self.d_sigmas[k + 1])
Esempio n. 16
0
    def grad2d_full_op(self):
        """ gradient operator in 2D as a sparse array 
        """
        g = self.g
        N = g.Ncells()
        M = g.Nedges()

        # construct the matrix from a sequence of indices and values
        # Gradient operator G is (M,N)
        # F = G.P

        g.edge_to_cells()

        centers = g.cells_center()  # can use circumcenters in testing at least

        ij = []
        values = []  # successive value for the same i.j will be summed

        for j in range(g.Nedges()):
            e = g.edges[j]
            ic1, ic2 = e['cells']

            if ic1 < 0 or ic2 < 0:
                continue  # boundary edge

            L = utils.dist(centers[ic1] - centers[ic2])

            # Assumes the sign convention of UnstructuredGrid.edges_normals(),
            # which is positive towards c2
            #
            ij.append((j, ic1))
            values.append(-1 / L)
            ij.append((j, ic2))
            values.append(1 / L)

        ijs = np.array(ij, dtype=np.int32)
        data = np.array(values, dtype=np.float64)
        G = sparse.coo_matrix((data, (ijs[:, 0], ijs[:, 1])), shape=(M, N))
        return G
Esempio n. 17
0
    def centerline_to_node_coordinates(self,center,max_width=50.0):
        """
        Select nodes within max_width/2 of the centerline, 
        and approximate an along/across channel coordinate system.
        Trims the ends to approximate the distance as perpendicular to the
        line. Used in add_mouth_as_bathy()

        center: linestring geometry
        max_width: full width of the swath to consider
        returns node_indexes, d_along, d_across
        node_indexes is in sorted order.
        """
        # Find the collection of nodes that may be relevant.
        region=center.buffer(max_width/2.0)
        node_sel=np.nonzero( self.grid.select_nodes_intersecting(region) )[0]

        # For those nodes calculate longitudinal and lateral coordinates
        node_long=np.r_[ [center.project( geometry.Point(p) )
                          for p in self.grid.nodes['x'][node_sel]] ]

        # And ignore nodes closest to the ends
        good=(node_long>0.0) & (node_long<center.length)
        node_sel=node_sel[good]
        node_long=node_long[good]
        projected=np.array( [np.array(center.interpolate(nlong)) for nlong in node_long] )
        pnts=self.grid.nodes['x'][node_sel]

        node_lat=utils.dist(projected-pnts) # fyi, not really a coordinate, since it's nonnegative
        good=node_lat<=max_width/2.
        node_sel=node_sel[good]
        node_long=node_long[good]
        node_lat=node_lat[good]
        pnts=self.grid.nodes['x'][node_sel]

        # This makes it easier for downstream code to use searchsorted
        assert np.all(np.diff(node_sel)>0)
        # Now I have a local coordinate system (ish)
        return node_sel,node_long,node_lat
Esempio n. 18
0
def maybe_refine_edge(j):
    centroids = cdt.cells_centroid()

    a, b = cdt.edges['nodes'][j]

    for cell in cdt.edges['cells'][j]:
        if poly.contains(geometry.Point(centroids[cell])):
            break
    else:
        assert False

    for n in cdt.cells['nodes'][cell]:
        if n not in [a, b]:
            c = n
            break
    else:
        assert False
    ab_len = utils.dist(cdt.nodes['x'][a] - cdt.nodes['x'][b])

    circum = cdt.cells_center()[cell]
    ab_circum_len = utils.point_line_distance(circum, cdt.nodes['x'][[a, b]])

    if (ab_len > l_min) and (ab_len > l_max_frac * ab_circum_len):
        subdivide(j)
Esempio n. 19
0
data['offset_idx'] = offset_idx  # already 1-based b/c of t_min
data['ss_idx'] = ss_idx

data['toa_offset'] = toa_offset

data['sigma_toa'] = 0.0001

# --

# Transformed data:
xdata = dict(data)
dist_mat = np.zeros((data['nh'], data['nh']), np.float64)
for h1 in range(data['nh']):
    for h2 in range(data['nh']):
        dist_mat[h1, h2] = utils.dist(data['H'][h1], data['H'][h2])
xdata['dist_mat'] = dist_mat
xdata['off_mask'] = np.arange(xdata['nh']) != (xdata['tk'] - 1)
xdata['mean_toa_offset'] = np.nanmean(xdata['toa_offset'], axis=1)

off_mask = xdata['off_mask']
mean_toa_offset = xdata['mean_toa_offset']

dist_mat = xdata['dist_mat']
sync_tag_idx_vec = xdata['sync_tag_idx_vec'] - 1
ss_idx0 = xdata['ss_idx'] - 1
offset_idx0 = xdata['offset_idx'] - 1
toa_offset = xdata['toa_offset']
valid = np.isfinite(toa_offset)

# Attempt a direct matrix solve
Esempio n. 20
0
def figure_usgs_salinity_time_series(station, station_name):
    mod_lp_win = usgs_lp_win = 40  # 40h lowpass

    # Gather USGS data:
    ds = usgs_salinity_time_series(station)
    usgs_dt_s = np.median(np.diff(ds.time)) / np.timedelta64(1, 's')
    usgs_stride = slice(None, None, max(1, int(3600. / usgs_dt_s)))
    if 'salinity_01' in ds:
        obs_salt_davg = np.c_[ds.salinity.values[usgs_stride],
                              ds.salinity_01.values[usgs_stride]]
        obs_salt_davg = np.nanmean(obs_salt_davg, axis=1)
    else:
        obs_salt_davg = ds.salinity.values[usgs_stride]

    dists = utils.dist(his_xy, [ds.x, ds.y])
    station_idx = np.argmin(dists)
    print("Nearest model station is %.0f m away from observation" %
          (dists[station_idx]))
    print(station_idx)

    def low_high(d, winsize):
        high = percentile_filter(d, 95, winsize)
        low = percentile_filter(d, 5, winsize)
        high = filters.lowpass_fir(high, winsize)
        low = filters.lowpass_fir(low, winsize)
        return low, high

    obs_salt_range = low_high(obs_salt_davg, usgs_lp_win)

    mod_salt_davg = his.salinity.isel(stations=station_idx).mean(dim='laydim')
    mod_salt_range = low_high(mod_salt_davg, mod_lp_win)

    # Try picking out a reasonable depth in the model
    surf_label = "Surface"
    bed_label = "Bed"
    if ds.salinity.attrs['elev_mab'] is not None:
        z_mab = ds.salinity.attrs['elev_mab']
        surf_label = "%.1f mab" % z_mab
        mod_salt_surf = extract_at_zab(his,
                                       "salinity",
                                       z_mab,
                                       stations=station_idx)
    else:
        mod_salt_surf = his.salinity.isel(stations=station_idx, laydim=-1)

    if ('salinity_01' in ds) and (ds.salinity_01.attrs['elev_mab']
                                  is not None):
        z_mab = ds.salinity_01.attrs['elev_mab']
        bed_label = "%.1f mab" % z_mab
        mod_salt_bed = extract_at_zab(his,
                                      "salinity",
                                      z_mab,
                                      stations=station_idx)
    else:
        mod_salt_bed = his.salinity.isel(stations=station_idx, laydim=0)

    mod_deltaS = mod_salt_bed - mod_salt_surf

    if 'salinity_01' in ds:
        if ds.site_no == '375607122264701':
            ds = ds.rename({
                "salinity": "salinity_01",
                "salinity_01": "salinity"
            })

    if 'salinity_01' in ds:
        obs_deltaS = ds.salinity_01.values - ds.salinity.values
    else:
        obs_deltaS = None

    if 1:  # plotting time series
        plt.figure(1).clf()
        fig, ax = plt.subplots(num=1)
        fig.set_size_inches([10, 4.75], forward=True)

        # These roughly mimic the style of water level plots in the validation report.
        obs_color = 'cornflowerblue'
        obs_lw = 1.5
        mod_color = 'k'
        mod_lw = 0.8

        if 'salinity_01' in ds:
            ax.plot(utils.to_dnum(ds.time)[usgs_stride],
                    filters.lowpass_fir(ds.salinity[usgs_stride], usgs_lp_win),
                    label='Obs. Upper',
                    lw=obs_lw,
                    color=obs_color)

            ax.plot(utils.to_dnum(ds.time)[usgs_stride],
                    filters.lowpass_fir(ds.salinity_01[usgs_stride],
                                        usgs_lp_win),
                    label='Obs. Lower',
                    lw=obs_lw,
                    color=obs_color,
                    ls='--')
        else:
            ax.plot(utils.to_dnum(ds.time)[usgs_stride],
                    filters.lowpass_fir(ds.salinity[usgs_stride], usgs_lp_win),
                    label='Obs.',
                    lw=obs_lw,
                    color=obs_color)

        ax.plot(utils.to_dnum(his.time),
                filters.lowpass_fir(mod_salt_surf, 40),
                label='Model %s' % surf_label,
                lw=mod_lw,
                color=mod_color)

        ax.plot(utils.to_dnum(his.time),
                filters.lowpass_fir(mod_salt_bed, 40),
                label='Model %s' % bed_label,
                lw=mod_lw,
                color=mod_color,
                ls='--')

        if 1:  # is it worth showing tidal variability?

            ax.fill_between(utils.to_dnum(ds.time)[usgs_stride],
                            obs_salt_range[0],
                            obs_salt_range[1],
                            color=obs_color,
                            alpha=0.3,
                            zorder=-1,
                            lw=0)

            ax.fill_between(utils.to_dnum(his.time),
                            mod_salt_range[0],
                            mod_salt_range[1],
                            color='0.3',
                            alpha=0.3,
                            zorder=-1,
                            lw=0)

        ax.set_title(station_name)
        ax.xaxis.axis_date()
        fig.autofmt_xdate()
        ax.set_ylabel('Salinity (ppt)')
        ax.legend(fontsize=10, loc='lower left')

        ax.axis(xmin=utils.to_dnum(t_spunup), xmax=utils.to_dnum(t_stop))

        fig.tight_layout()

        safe_station = station_name.replace(' ', '_')
        fig.savefig(os.path.join(savepath, "%s.png" % safe_station), dpi=100)
        fig.savefig(os.path.join(savepath, "%s.pdf" % safe_station))

    if tex_fp is not None:  # metrics
        target_time_dnum = utils.to_dnum(his.time.values)

        obs_time_dnum = utils.to_dnum(ds.time.values)

        obs_salt_davg_intp = utils.interp_near(target_time_dnum,
                                               obs_time_dnum[usgs_stride],
                                               obs_salt_davg, 1.5 / 24)
        valid = np.isfinite(mod_salt_davg * obs_salt_davg_intp).values
        valid = (valid
                 & (target_time_dnum >= utils.to_dnum(t_spunup))
                 & (target_time_dnum <= utils.to_dnum(t_stop)))
        dnum = target_time_dnum[valid]
        mod_values = mod_salt_davg[valid].values
        obs_values = obs_salt_davg_intp[valid]

        bias = np.mean(mod_values - obs_values)
        ms = utils.model_skill(mod_values, obs_values)
        r2 = np.corrcoef(mod_values, obs_values)[0, 1]
        rmse = utils.rms(mod_values - obs_values)
        tex_fp.write((
            "%-16s  "  # station name
            " & %7.3f"  # skill
            " & %11.2f"  # bias
            " & %7.3f"  # r2
            " & %10.2f"  # rmse
            " \\\ \\hline \n") % (station_name, ms, bias, r2, rmse))
Esempio n. 21
0
 def mask_near_point(xy):
     dists = utils.dist(xy, coamps_xy)
     return (dists > buffer_dist)
Esempio n. 22
0
z_cell = g_csc.interp_node_to_cell(g_csc.nodes['depth'])

cc = g_csc.cells_center()
thresh = 400  # 500 includes one bad node at the DCC.  400 is good for the current grid.

for n in valid_dcd_nodes:
    dsm_x = dsm_grid.nodes['x'][n]
    c_near = g_csc.select_cells_nearest(dsm_x)

    # And then find the deepest cell nearby, subject
    # to distance from DSM point
    c_nbrs = [c_near]
    for _ in range(5):
        c_nbrs = np.unique([
            nbr for c in c_nbrs for nbr in g_csc.cell_to_cells(c)
            if nbr >= 0 and utils.dist(dsm_x, cc[nbr]) < thresh
        ])
    if len(c_nbrs) == 0:
        bad_pairs.append([dsm_x, cc[c_near]])
    else:
        match = c_nbrs[np.argmin(z_cell[c_nbrs])]
        pairs.append([dsm_x, cc[match]])

if 1:

    plt.figure(1).clf()
    dsm_grid.plot_nodes()
    ecoll = dsm_grid.plot_edges(centerlines=True)

    g_csc.plot_cells(alpha=0.5, zorder=-1, color='0.7')
Esempio n. 23
0
 def calc_distances(self):
     dist_mat=np.zeros( (self.nh,self.nh), np.float64)
     for h1 in range(self.nh):
         for h2 in range(self.nh):
             dist_mat[h1,h2]=utils.dist(self.H[h1],self.H[h2])
     return dist_mat
    txys = []
    for diff in [diff_fwd, diff_rev]:
        ivp = scipy.integrate.RK45(diff,
                                   t0=t0,
                                   y0=y0[:2],
                                   t_bound=max_t,
                                   max_step=10)

        d = 0.0
        output = []
        rec = lambda: output.append((ivp.t, ivp.y[0], ivp.y[1]))
        rec()
        while ivp.status == 'running':
            ivp.step()
            rec()
            d += utils.dist(output[-2][1:], output[-1][1:])
            if d >= max_dist:
                # print(".",end="",flush=True)
                break
        #if d<max_dist:
        #    print("-",end="",flush=True)
        txys.append(np.array(output))
    # concatenate forward and backward
    fwd, rev = txys
    rev[:, 0] *= -1  # negative time for reverse
    rev = rev[::-1]
    track = np.concatenate((rev[:-1], fwd[:]), axis=0)
    tracks.append(track)

##
        ivp = scipy.integrate.RK45(diff,
                                   t0=t0,
                                   y0=y0[:2],
                                   t_bound=max_t,
                                   max_step=10)

        d = 0.0
        output = []
        if stream_dim == 'time':
            rec = lambda: output.append((ivp.t, ivp.y[0], ivp.y[1]))
        else:
            rec = lambda: output.append((d, ivp.y[0], ivp.y[1]))
        rec()
        while ivp.status == 'running':
            ivp.step()
            d += utils.dist(output[-1][1:], ivp.y)
            rec()
            if d >= max_dist:
                break
        txys.append(np.array(output))
    # concatenate forward and backward
    fwd, rev = txys
    rev[:, 0] *= -1  # negate time for reverse
    rev = rev[::-1]
    track = np.concatenate((rev[:-1], fwd[:]), axis=0)
    tracks.append(track)

##

if 1:
    plt.figure(2).clf()
Esempio n. 26
0
##

# And what does magnitude of distance gradient look like?
from stompy.model.stream_tracer import U_perot

e2c=g.edge_to_cells().copy()
cc=g.cells_center()
ec=g.edges_center()

c1=e2c[:,0]
c2=e2c[:,1]
c1[c1<0]=c2[c1<0]
c2[c2<0]=c1[c2<0]
dg=np.where(c1==c2,
            1.0,  # cell differences will always be zero, so this doesn't matter.
            utils.dist(cc[c1] - cc[c2]))

def dist_ratio(D):
    # calculate per-edge gradients
    dDdn=(D[c2]-D[c1])/dg
    gradD=U_perot(g,g.edges_length()*dDdn,g.cells_area())
    gradmag=utils.mag(gradD)
    return gradmag

##

from matplotlib.colors import LogNorm


for v in ds.data_vars:
    if not v.startswith('dist'): continue
Esempio n. 27
0
 def cost(c):
     return utils.dist(xy,cc[c])
Esempio n. 28
0
def add_sfbay_potw(mdu,
                   rel_src_dir, # added rel_src_dir alliek dec 2020
                   potw_dir,
                   adjusted_pli_fn,
                   grid,dredge_depth,
                   all_flows_unit=False,
                   time_offset=None,
                   write_salt=True,write_temp=True):
    """
    time_offset: shift all dates by the given offset.  To run 2016 
    with data from 2015, specify np.timedelta64(-365,'D')

    write_salt: leave as True for older DFM, and newer DFM only set to
    true when the simulation includes salinity.

    write_temp: same, but for temperature
    """
    run_base_dir=mdu.base_path
    ref_date,run_start,run_stop = mdu.time_range()
    old_bc_fn=mdu.filepath(["external forcing","ExtForceFile"])

    if time_offset is not None:
        run_start = run_start + time_offset
        run_stop = run_stop + time_offset
        ref_date = ref_date + time_offset
        
    potws=xr.open_dataset(os.path.join(potw_dir,'outputs','sfbay_delta_potw.nc'))
    adjusted_features=dio.read_pli(adjusted_pli_fn)

    # select a time subset of the flow data, starting just before the
    # simulation period, and running beyond the end:
    time_pnts = np.searchsorted(potws.time, [run_start-DAY,run_stop+DAY])
    time_pnts = time_pnts.clip(0,len(potws.time)-1)
    time_idxs=range(time_pnts[0],time_pnts[1]) # enumerate them for loops below

    with open(old_bc_fn,'at') as fp:
        for site in potws.site.values:
            # NB: site is bytes at this point
            potw=potws.sel(site=site)
            try:
                site_s=site.decode()
            except AttributeError:
                site_s=site # py2

            if site_s in ['false_sac','false_sj']:
                six.print_("(skip %s) "%site_s,end="")
                continue

            if potw.utm_x.values.mean() > 610200:
                # Delta POTWs are in this file, too, but not in this
                # grid.  Luckily they are easy to identify based on
                # x coordinate.
                six.print_("(skip %s -- too far east) "%site_s,end="")
                continue
            
            six.print_("%s "%site_s,end="")

            fp.write( ("QUANTITY=discharge_salinity_temperature_sorsin\n"
                       "FILENAME=%s/%s.pli\n"
                       "FILETYPE=9\n"
                       "METHOD=1\n"
                       "OPERAND=O\n"
                       "AREA=0 # no momentum\n"
                       "\n")%(rel_src_dir,site_s) ) # added rel_src_dir alliek dec 2020

            # Write the location - writing a single point appears to work,
            # based on how it shows up in the GUI.  Otherwise we'd have to
            # manufacture a point outside the domain.
            with open(os.path.join(run_base_dir,rel_src_dir,'%s.pli'%site_s),'wt') as pli_fp: # added rel_src_dir alliek dec 2020
                # Scan adjusted features for a match to use instead
                # This is handled slightly differently with POTWs - use the

                # put the depth at -50, should be at the bed
                feat=[site_s,
                      np.array([[potw.utm_x.values,potw.utm_y.values,-50.0]]),
                      ['']]

                for adj_feat in adjusted_features:
                    if adj_feat[0] == site_s:
                        # Merge them if the adjusted feature is more than 10 m away
                        # (to allow for some rounding in the ascii round-trip.)
                        offset=utils.dist( adj_feat[1][-1][:2] - feat[1][-1][:2] )
                        if offset > 10.0:
                            # Just add on the extra point - but may have to promote one 
                            # or the other to 3D.
                            old_geo=feat[1]
                            new_geo=adj_feat[1][-1:]
                            if old_geo.shape[1] != new_geo.shape[1]:
                                if old_geo.shape[1]<3:
                                    old_geo=np.concatenate( (old_geo,0*old_geo[:,:1]), axis=1)
                                else:
                                    # copy depth from old_geo
                                    new_geo=np.concatenate( (new_geo,
                                                             old_geo[-1,-1]+0*new_geo[:,:1]),
                                                            axis=1)

                            # if the original feature was outside the grid, then all is well,
                            # and it will show up in the GUI as a line from the original location
                            # outside the domain to the new location in the domain.
                            if grid.select_cells_nearest(old_geo[-1,:2],inside=True) is None:
                                feat[1]=np.concatenate( (old_geo,new_geo),axis=0 )
                                if len(feat)==3: # includes labels, but they don't matter here, right?
                                    feat[2].append('')
                            else:
                                # but if the original location is inside the grid, this will be interpreted
                                # as a sink-source pair, so we instead just put the single, adjusted
                                # location in.  This is done after potentially copying z-coordinate
                                # data from the original location.
                                feat[1]=new_geo
                        break

                dio.write_pli(pli_fp,[feat])

                dredge_grid.dredge_discharge(grid,feat[1],dredge_depth)

            with open(os.path.join(run_base_dir,rel_src_dir,'%s.tim'%site_s),'wt') as tim_fp: # added rel_src_dir alliek dec 2020
                for tidx in time_idxs:
                    tstamp_minutes = (potw.time[tidx]-ref_date) / np.timedelta64(1,'m')

                    if all_flows_unit:
                        flow_cms=1.0
                    else:
                        flow_cms=potw.flow[tidx]

                    items=[tstamp_minutes,flow_cms]
                    if write_salt:
                        items.append(0.0)
                    if write_temp:
                        items.append(20.0)

                    tim_fp.write(" ".join(["%g"%v for v in items])+"\n")

    six.print_("Done with POTWs")
Esempio n. 29
0
def triangulate_hole(grid,seed_point,max_nodes=5000):
    # manually tell it where the region to be filled is.
    # 5000 ought to be plenty of nodes to get around this loop
    nodes=grid.enclosing_nodestring(seed_point,max_nodes)
    xy_shore=grid.nodes['x'][nodes]

    # Construct a scale based on existing spacing
    # But only do this for edges that are part of one of the original grids
    grid.edge_to_cells() # update edges['cells']
    sample_xy=[]
    sample_scale=[]
    ec=grid.edges_center()
    el=grid.edges_length()

    for na,nb in utils.circular_pairs(nodes):
        j=grid.nodes_to_edge([na,nb])
        if np.any( grid.edges['cells'][j] >= 0 ):
            sample_xy.append(ec[j])
            sample_scale.append(el[j])

    sample_xy=np.array(sample_xy)
    sample_scale=np.array(sample_scale)

    apollo=field.PyApolloniusField(X=sample_xy,F=sample_scale)

    # Prepare that shoreline for grid generation.

    grid_to_pave=unstructured_grid.UnstructuredGrid(max_sides=6)

    AT=front.AdvancingTriangles(grid=grid_to_pave)

    AT.add_curve(xy_shore)
    # This should be safe about not resampling existing edges
    AT.scale=field.ConstantField(50000)

    AT.initialize_boundaries()

    AT.grid.nodes['fixed'][:]=AT.RIGID
    AT.grid.edges['fixed'][:]=AT.RIGID

    # Old code compared nodes to original grids to figure out RIGID
    # more general, if it works, to see if a node participates in any cells.
    # At the same time, record original nodes which end up HINT, so they can
    # be removed later on.
    src_hints=[]
    for n in AT.grid.valid_node_iter():
        n_src=grid.select_nodes_nearest(AT.grid.nodes['x'][n])
        delta=utils.dist( grid.nodes['x'][n_src], AT.grid.nodes['x'][n] )
        assert delta<0.1 # should be 0.0

        if len(grid.node_to_cells(n_src))==0:
            # It should be a HINT
            AT.grid.nodes['fixed'][n]=AT.HINT
            src_hints.append(n_src)
            # And any edges it participates in should not be RIGID either.
            for j in AT.grid.node_to_edges(n):
                AT.grid.edges['fixed'][j]=AT.UNSET

    AT.scale=apollo
    
    if AT.loop():
        AT.grid.renumber()
    else:
        print("Grid generation failed")
        return AT # for debugging -- need to keep a handle on this to see what's up.

    for n in src_hints:
        grid.delete_node_cascade(n)
        
    grid.add_grid(AT.grid)

    # Surprisingly, this works!
    grid.merge_duplicate_nodes()

    grid.renumber()

    return grid
Esempio n. 30
0
def set_ic_from_usgs_sfbay(model,
                           search_pad=np.timedelta64(20,'D'),
                           scalar='salt',
                           usgs_scalar='Salinity',
                           clip=None,
                           ocean_surf=None,ocean_grad=None,
                           ocean_xy=[534000,4181000],
                           cache_dir=None):
    df=usgs_sfbay.query_usgs_sfbay(period_start=model.run_start-search_pad,
                                   period_end=model.run_start+search_pad,
                                   cache_dir=cache_dir)
    xy=model.ll_to_native( df.loc[:, ['longitude','latitude'] ].values )
    df['x']=xy[:,0]
    df['y']=xy[:,1]

    # make the scalar name generic to simplify code below
    df.rename({usgs_scalar:'scalar'},axis=1,inplace=True)

    # Condense vertical information into a surface value and a linear vertical gradient
    df_by_profile=df.groupby(['Date','Station Number'])[ 'Depth','scalar'].apply( calc_z_gradient )

    # Condense multiple cruises by linearly interpolated that profiles
    df_time_interp=df_by_profile.reset_index().groupby('Station Number').apply( time_interp, target=model.run_start)

    # Get the x/y data back in there
    station_locs=df.groupby('Station Number')['x','y'].first()
    scal_data=pd.merge(df_time_interp,station_locs,left_index=True,right_index=True)

    # Okay - ready for spatial extrapolation

    # This doesn't do so well around ggate, where it doesn't really figure out
    # which sample should dominate.  So force an ocean salinity:
    closest_station=np.argmin(utils.dist(ocean_xy, scal_data[ ['x','y']].values ))

    # default to the value nearest the ggate, but allow caller to override
    if ocean_surf is None:
        ocean_surf=scal_data['scalar_surf'].iloc[closest_station]
    if ocean_grad is None:
        ocean_grad=scal_data['scalar_grad'].iloc[closest_station]

    scal_data_ocean=scal_data.append( {'scalar_surf':ocean_surf,
                                       'scalar_grad':ocean_grad,
                                       'x':ocean_xy[0], # point outside Golden Gate.
                                       'y':ocean_xy[1]},
                                      ignore_index=True )
    scal_surf_2d=interp_4d.weighted_grid_extrapolation(model.grid,scal_data_ocean,
                                                       value_col='scalar_surf',
                                                       alpha=1e-5,
                                                       weight_col=None)
    scal_grad_2d=interp_4d.weighted_grid_extrapolation(model.grid,scal_data_ocean,
                                                       value_col='scalar_grad',
                                                       alpha=1e-5,
                                                       weight_col=None)

    # Set salinity in the ic file:
    # salt has dimensions [time,layer,cell], use isel and transpose
    # to be sure
    depth_Nk=np.cumsum(model.ic_ds.dz.values) # positive down
    ic_values=scal_surf_2d[None,:] + scal_grad_2d[None,:]*depth_Nk[:,None]
    if clip is not None:
        ic_values=ic_values.clip(*clip)
    model.ic_ds[scalar].isel(time=0).transpose('Nk','Nc').values[...]=ic_values