Exemplo n.º 1
0
    def nudge_node_orthogonal(self, n):
        g = self.g
        n_cells = g.node_to_cells(n)

        centers = g.cells_center(refresh=n_cells, mode='sequential')

        targets = [
        ]  # list of (x,y) which fit the individual cell circumcenters
        for n_cell in n_cells:
            cell_nodes = g.cell_to_nodes(n_cell)
            # could potentially skip n_cell==n, since we can move that one.
            if len(cell_nodes) <= 3:
                continue  # no orthogonality constraints from triangles at this point.

            offsets = g.nodes['x'][cell_nodes] - centers[n_cell, :]
            dists = mag(offsets)
            radius = np.mean(dists)

            # so for this cell, we would like n to be a distance of radius away
            # from centers[n_cell]
            n_unit = to_unit(g.nodes['x'][n] - centers[n_cell])

            good_xy = centers[n_cell] + n_unit * radius
            targets.append(good_xy)
        if len(targets):
            target = np.mean(targets, axis=0)
            g.modify_node(n, x=target)
            return True
        else:
            return False
Exemplo n.º 2
0
def vec_ll2utm(utm_xy):
    ll0=utm2ll(utm_xy)

    # go a bit more brute force:
    dlat=dlon=0.0001
    ll_east=[ll0[0]+dlon,ll0[1]]
    ll_north=[ll0[0],ll0[1]+dlat]
    utm_east=ll2utm(ll_east) - utm_xy
    utm_north=ll2utm(ll_north) - utm_xy

    utm_to_true_distance_n=1000*utils.haversine(ll0,ll_north) / utils.mag(utm_north)
    utm_to_true_distance_e=1000*utils.haversine(ll0,ll_east) / utils.mag(utm_east)
    print("utm_to_true_distance e=%.5f n=%.5f"%(utm_to_true_distance_e,utm_to_true_distance_n))

    east_norm=utils.to_unit(utm_east)
    # a matrix which left-multiples a true east/north vector, to
    # get an easting/northing vector.  have not yet applied distance
    # correction
    rot=np.array([ [east_norm[0], -east_norm[1]],  #  u_east
                   [east_norm[1], east_norm[0]]])  #  v_north

    # e.g. if 100m in utm is 101m in reality, then a east/north vector gets rotated
    # to align with utm, but then we scale it up, too.
    # temporarily disable to see magnitude of its effect.
    # rot*=utm_to_true_distance
    # gets tricky... top row rot[0,:] yields a utm easting component, which will
    # later be multipled by a northing distance of the flux face.  northing distances
    # should be adjusted to true with utm_to_distance_n.
    # but that made the results slightly worse...
    # it should be multiplication, but trying division...
    rot[0,:] /= utm_to_true_distance_n
    rot[1,:] /= utm_to_true_distance_e
    
    # good news is that angle error in x is same as in y, about 1 degree for this point.
    #print("East is %.3f deg, vs 0"%(np.arctan2(utm_east[1],utm_east[0])*180/np.pi) )
    #print("North is %.3f deg, vs 90"%(np.arctan2(utm_north[1],utm_north[0])*180/np.pi) )
    return rot
Exemplo n.º 3
0
for transect in transects:
    tran_fig_dir = get_tran_fig_dir(transect)
    os.path.exists(tran_fig_dir) or os.mkdir(tran_fig_dir)

    tran_dss = read_transect(transect)

    # Plan view quiver for individual repeats:
    zoom_overview = (646804, 647544, 4185572, 4186124)
    ds0 = tran_dss[0]
    zoom_tight = tran_zoom(ds0)

    # Choose a coordinate system from the first transect:
    xy = np.c_[ds0.x_utm.values, ds0.y_utm.values]
    xy0 = xy[0]
    across_unit = utils.to_unit(xy[-1] - xy[0])
    # Roughly force to point river right
    mean_u = ds0.Ve.mean()
    mean_v = ds0.Vn.mean()

    # resolve ambiguity to make mean flow positive, roughly assuming
    # ebb/river flow
    if np.dot(across_unit, [mean_v, -mean_u]) < 0:
        across_unit *= -1
        xy0 = xy[-1]
    # Then this is downstream:
    along_unit = np.array([-across_unit[1], across_unit[0]])

    def ds_to_linear(ds):
        xy = np.c_[ds.x_utm.values, ds.y_utm.values]
        # pull dimensions from ds, distances from vector product
Exemplo n.º 4
0
def adjust_text_position(ax, max_iter=200):
    texts = ax.texts

    bboxes = []
    for txt in texts:
        ext = txt.get_window_extent()
        bboxes.append([[ext.xmin, ext.ymin], [ext.xmax, ext.ymax]])
    bboxes = np.array(bboxes)

    # each iteration move overlapping texts by about this
    # much
    dx = 2.0
    # come up with pixel offsets for each label.
    pix_offsets = np.zeros((len(texts), 1, 2), np.float64)
    for _ in range(max_iter):
        changed = False

        new_bboxes = bboxes + pix_offsets
        for a in range(len(texts)):
            for b in range(a + 1, len(texts)):
                # is there any force between these two labels?
                # check overlap

                int_min = np.maximum(new_bboxes[a, 0, :], new_bboxes[b, 0, :])
                int_max = np.minimum(new_bboxes[a, 1, :], new_bboxes[b, 1, :])

                if np.all(int_min < int_max):
                    #print("Collision %s - %s"%(texts[a].get_text(),
                    #                           texts[b].get_text()))

                    # This could probably be faster and less verbose.
                    # separate axis is taken from the overlapping region
                    # and direction

                    # choose the direction that most quickly eliminates the overlap
                    # area. could also just choose the least overlapping direction
                    opt = utils.to_unit(
                        np.array(
                            [int_max[1] - int_min[1],
                             int_max[0] - int_min[0]]))

                    ab = new_bboxes[b].mean(axis=0) - new_bboxes[a].mean(
                        axis=0)
                    if np.dot(opt, ab) < 0:
                        opt *= -1

                    pix_offsets[a, 0, :] -= dx * opt / 2
                    pix_offsets[b, 0, :] += dx * opt / 2

                    changed = True
        if not changed:
            break

    # Update positions of the texts:
    deltas = np.zeros((len(texts), 2), np.float64)
    for i in range(len(texts)):
        txt = texts[i]
        xform = txt.get_transform()
        ixform = xform.inverted()
        p = bboxes[i, 0, :]
        pos0 = xi.transform_point(p)
        pos_new = xi.transform_point(p + pix_offsets[i, 0, :])
        deltas[i] = delta = pos_new - pos0
        txt.set_position(np.array(txt.get_position()) + delta)
    return deltas
Exemplo n.º 5
0
def subdivide():
    # what does cdt need to provide for this to work?
    vcenters = cdt.cells_center(refresh=True)

    n_edges = cdt.Nedges()
    to_subdivide = []

    min_edge_length = 10.0

    for j_g in g.valid_edge_iter():
        a, b = g.edges['nodes'][j_g]
        j = cdt.nodes_to_edge([a, b])
        cells = cdt.edges['cells'][j]

        assert cells.max() >= 0

        for ci, c in enumerate(cells):
            if c < 0:
                continue

            # Need the signed distance here:
            pntV = vcenters[c]
            pntA = cdt.nodes['x'][a]
            pntB = cdt.nodes['x'][b]
            AB = pntB - pntA
            AV = pntV - pntA
            # just switch sign on this
            left = utils.to_unit(np.array([-AB[1], AB[0]]))
            if ci == 1:
                left *= -1
            line_clearance = np.dot(left, AV)
            v_radius = utils.mag(AV)

            if utils.mag(AB) < min_edge_length:
                continue
            # line_clearance=utils.point_line_distance(vcenters[c],
            #                                         cdt.nodes['x'][ [a,b] ] )
            # v_radius=utils.dist( vcenters[c], cdt.nodes['x'][a] )

            if (v_radius > 1.2 * line_clearance) and (v_radius >
                                                      min_edge_length):
                # second check - make sure that neither AC nor BC are also on the
                # boundary
                c_j = cdt.cell_to_edges(c)
                count = cdt.edges['constrained'][c_j].sum()

                if count == 1:
                    to_subdivide.append(j_g)
                    break
                elif count == 0:
                    print("While looking at edge %d=(%d,%d)" % (j_g, a, b))
                    raise Exception(
                        "We should have found at least 1 boundary edge")
                elif count == 3:
                    print(
                        "WARNING: Unexpected count of boundary edges in one element: ",
                        count)

    for j in to_subdivide:
        sys.stdout.write(str(j) + ",")
        sys.stdout.flush()
        g.split_edge(j)

    return len(to_subdivide)
Exemplo n.º 6
0
# All 'to' indices are real elements.
# from indices include boundaries

from_xy = np.zeros((len(FlowLink_from), 2), 'f8')
to_xy = np.zeros((len(FlowLink_from), 2), 'f8')
is_bc = FlowLink_from >= len(elt_xy)
from_xy[~is_bc] = elt_xy[FlowLink_from[~is_bc]]

to_xy[:, :] = elt_xy[FlowLink_to]
from_xy[is_bc] = to_xy[is_bc]

link_xy = 0.5 * (from_xy + to_xy)
link_xy[is_bc] += (np.random.random((is_bc.sum(), 2)) - 0.5) * 1000

link_norms = utils.to_unit(to_xy - from_xy)
link_norms[np.isnan(link_norms)] = 0

# Not a robust way to discern normals to go with boundary condition unorm values.
# can at least scatter plot them.

#

if sign == 'reverse':
    num = 1
else:
    num = 2

plt.figure(num).clf()
fig, ax = plt.subplots(num=num)
fig.suptitle('%s BCs' % sign)
Exemplo n.º 7
0
##

differ.calc_fluxes()
differ.calc_flux_vectors_and_grad()
differ.flux_vector_c

##

plt.figure(1).clf()
fig, ax = plt.subplots(1, 1, num=1)

g_sub.plot_cells(values=np.log(differ.C_solved), cmap='jet')
ax.axis('equal')

unit_flux_vec = utils.to_unit(differ.flux_vector_c)

ax.quiver(g_sub.cells_center()[:, 0],
          g_sub.cells_center()[:, 1], unit_flux_vec[:, 0], unit_flux_vec[:, 1])

##

# That's not too bad...
# See how it traces using just the one boundary, but will come back to layer in
# additional boundaries.
six.moves.reload_module(stream_tracer)
stream_tracer.prepare_grid(g_sub)

alongs = []

for i in utils.progress(range(len(cutoff_xyz))):
Exemplo n.º 8
0
    cc = self.cells_center()
    for c in range(self.Ncells()):
        pnts = []
        for j in self.cell_to_edges(c):
            pnts.append(
                [ec[j, 0] - cc[c, 0], ec[j, 1] - cc[c, 1], edge_vals[j]])
        pnts = np.array(pnts)
        cell_side_gradients[c, 0], _ = np.polyfit(pnts[:, 0], pnts[:, 2], 1)
        cell_side_gradients[c, 1], _ = np.polyfit(pnts[:, 1], pnts[:, 2], 1)
    return cell_side_gradients


grad_dzf = cell_gradient_of_edges(g, ds.dzf.values[ti, 0, :])

u_unit = np.c_[ds.uc[ti, 0, :], ds.vc[ti, 0, :]]
u_unit = utils.to_unit(u_unit)
u_unit[np.isnan(u_unit)] = 0.0
dzf_dot_u = (grad_dzf[:, 0] * u_unit[:, 0] + grad_dzf[:, 1] * u_unit[:, 1])

#Qgrad=ax.quiver(cc[sel,0],cc[sel,1],grad_dzf[sel,0],grad_dzf[sel,1],
#                color='r')
ccoll = g.plot_cells(ax=ax, values=dzf_dot_u)
ccoll.set_zorder(-5)
ccoll.set_cmap('seismic')
ccoll.set_clim([-0.1, 0.1])
##
if 0:
    gg = unstructured_grid.UnstructuredGrid.read_suntans(run_dir)
    depth = np.loadtxt(os.path.join(run_dir, "depths.dat-voro"))
    ccoll = gg.plot_cells(values=depth[:, 2], zorder=-2)
    cmap = scmap.load_gradient('ncview_banded.cpt')
Exemplo n.º 9
0
    def summary_figure(self, num=3):
        fig = plt.figure(num)
        fig.clf()

        ax_map = fig.add_subplot(1, 2, 1)
        ax_V = fig.add_subplot(1, 2, 2)

        xyxy = self.region_poly.bounds
        clip = [xyxy[0], xyxy[2], xyxy[1], xyxy[3]]
        hyp = self.hyp

        # hyp.g.plot_edges(clip=clip,ax=ax_map,color='k',lw=0.4,alpha=0.4)
        plot_wkb.plot_wkb(hyp.g_poly,
                          fc='0.8',
                          ec='none',
                          ax=ax_map,
                          zorder=-4)
        hyp.g.plot_cells(mask=self.region_cells, ax=ax_map, color='orange')

        # Show the location of the reference station
        ax_map.plot([
            self.hyp.his.station_x_coordinate.isel(
                stations=self.region_station)
        ], [
            self.hyp.his.station_y_coordinate.isel(
                stations=self.region_station)
        ], 'go')
        # bound_xs=self.bounding_cross_sections()
        xys = []
        uvs = []
        for k in self.bound_xs.keys():
            sgn = self.bound_xs[k]
            sec_x = self.hyp.his.cross_section_x_coordinate.isel(
                cross_section=k).values
            sec_y = self.hyp.his.cross_section_y_coordinate.isel(
                cross_section=k).values
            sec_xy = np.c_[sec_x, sec_y]
            sec_xy = sec_xy[sec_xy[:, 0] < 1e10]
            ax_map.plot(sec_xy[:, 0], sec_xy[:, 1], 'r-')
            uvs.append(sgn * (sec_xy[-1] - sec_xy[0]))
            xys.append(sec_xy.mean(axis=0))
        xys = np.array(xys)
        uvs = np.array(uvs)
        uvs = utils.rot(-np.pi / 2, utils.to_unit(uvs))
        ax_map.quiver(xys[:, 0], xys[:, 1], uvs[:, 0], uvs[:, 1])

        ax_map.axis('equal')

        order = np.argsort(self.eta)
        ax_V.plot(self.eta[order], self.vol_and_Q[order], label="model")
        ax_V.plot(self.dem_etas,
                  self.dem_volumes,
                  label='DEM',
                  color='k',
                  lw=0.6)

        # and what we expect from the model, assuming nonlin2d=0
        cell_etas, cell_vols = self.calculate_cell_volumes()
        ax_V.plot(cell_etas, cell_vols, label='Cell depth', color='r', lw=0.6)

        ax_V.set_ylabel('Vol')
        ax_V.set_xlabel('eta')

        ax_V.legend()
        return fig