def nudge_node_orthogonal(self, n): g = self.g n_cells = g.node_to_cells(n) centers = g.cells_center(refresh=n_cells, mode='sequential') targets = [ ] # list of (x,y) which fit the individual cell circumcenters for n_cell in n_cells: cell_nodes = g.cell_to_nodes(n_cell) # could potentially skip n_cell==n, since we can move that one. if len(cell_nodes) <= 3: continue # no orthogonality constraints from triangles at this point. offsets = g.nodes['x'][cell_nodes] - centers[n_cell, :] dists = mag(offsets) radius = np.mean(dists) # so for this cell, we would like n to be a distance of radius away # from centers[n_cell] n_unit = to_unit(g.nodes['x'][n] - centers[n_cell]) good_xy = centers[n_cell] + n_unit * radius targets.append(good_xy) if len(targets): target = np.mean(targets, axis=0) g.modify_node(n, x=target) return True else: return False
def eta_fn(self,xy,t): """ Analytical solution for freesurface, used for initial condition and validation. """ r=utils.mag(xy) _,_,eta = self.analytical_ur_utan_eta(r,t) return eta
def add_more_swim_data(track): track['swim_speed']=utils.mag(track.loc[:,['swim_u','swim_v']].values) # and the swim-only track dt=np.diff(track['tnum'].values) dx=np.cumsum( dt*track['swim_urel'].values[:-1]) dy=np.cumsum( dt*track['swim_vrel'].values[:-1]) track['swim_x']=np.r_[ 0, dx ] track['swim_y']=np.r_[ 0, dy ]
def test_diel(column, df_start=df_start, num=1): data = df_start[column].values vec = np.c_[np.cos(tod_mid_rad) * data, np.sin(tod_mid_rad) * data] vec_mean = vec.mean(axis=0) vec_mag = utils.mag(vec_mean) shuffle_mags = [] shuffle_vecs = [] N = 10000 # should be ~1e2 bigger for _ in range(N): shuffle_rad = np.random.permutation(tod_mid_rad) shuffle_mean = np.mean(np.c_[np.cos(shuffle_rad) * data, np.sin(shuffle_rad) * data], axis=0) shuffle_vecs.append(shuffle_mean) shuffle_mag = utils.mag(shuffle_mean) shuffle_mags.append(shuffle_mag) shuffle_vecs = np.array(shuffle_vecs) plt.figure(num).clf() fig, ax = plt.subplots(num=num) ax.plot(vec[:, 0], vec[:, 1], 'g+') ax.axhline(0, color='k', lw=0.5) ax.axvline(0, color='k', lw=0.5) ax.plot([vec_mean[0]], [vec_mean[1]], 'mo', zorder=3) stride = slice(None, None, N // 1000) ax.plot(shuffle_vecs[stride, 0], shuffle_vecs[stride, 1], 'ko', alpha=0.1) mag_sort = np.sort(shuffle_mags) p = 1 - np.searchsorted(mag_sort, vec_mag) / float(1 + len(mag_sort)) if mag_sort[-1] < vec_mag: print("p < %.6f" % p) else: print("p ~ %.6f" % p) ax.axis('equal') ax.set_adjustable('box') ax.axis(xmin=-0.15, xmax=0.15, ymin=-0.15, ymax=0.15) fig.savefig(os.path.join(fig_dir, f'diel-{column}-scatter.png'), dpi=200)
def step_output(self, n, ei, **kwargs): if n % 20: return ui = self.get_center_vel(self.uj) plt.figure(2).clf() fig, axs = plt.subplots(3, 1, sharex=True, sharey=True, num=2) ax_h = axs[0] ax_ei = axs[1] ax_zi = axs[2] mag = utils.mag(ui) ccoll = g.plot_cells(values=sim.ei + sim.zi, ax=ax_h) plt.colorbar(ccoll, ax=ax_h, label='h=ei+zi') if 0: cc = g.cells_center() quiv = ax.quiver(cc[:, 0], cc[:, 1], ui[:, 0], ui[:, 1]) ec = g.edges_center() quiv = ax_h.quiver(ec[:, 0], ec[:, 1], self.uj * sim.en[:, 0], self.uj * sim.en[:, 1], color='b', scale=0.5) umag = 0.1 # np.percentile(uj,90) ax_h.quiverkey(quiv, 0.1, 0.1, umag, "%.4g m/s" % umag) hj = self.hjstar g.plot_edges(color='k', lw=0.5, ax=ax_h, labeler=lambda j, r: "%.2f" % hj[j]) eicoll = g.plot_cells(values=self.ei, ax=ax_ei, labeler=lambda i, r: "%.2f" % (self.ei[i])) plt.colorbar(eicoll, ax=ax_ei, label='ei') zicoll = g.plot_cells(values=-self.zi, ax=ax_zi, labeler=lambda i, r: "%.2f" % (-self.zi[i])) plt.colorbar(zicoll, ax=ax_zi, label='-zi') axs[0].axis('equal') for ax in axs: plt.setp(ax.texts, clip_on=1) plt.draw() plt.pause(0.01)
def interp_invdist(data): #data: dataset with target, stream_dist, sample_z stream_trans = data.stream_dist.values valid = np.isfinite(stream_trans[:, 0]) dist = utils.mag(stream_trans[valid, :] * scale) if len(dist) == 0: return np.nan weights = (nugget + dist)**power z_interp = (weights * data.sample_z.values[valid]).sum() / weights.sum() return z_interp
def recon(xy, k=1000, eps=0.5, interp='rbf'): target = [0, xy[0], xy[1]] nbr_dists, nbrs = kdt_txy.query(target, k=k) nbrs = np.array(nbrs) if 0: # trim to a single, closest sample per track nbr_tracks = all_to_track_i[nbrs, 0] slim_nbrs = [] for k, idxs in utils.enumerate_groups(nbr_tracks): best = np.argmin(nbr_dists[idxs]) slim_nbrs.append(nbrs[idxs[best]]) nbrs = np.array(slim_nbrs) nbr_z = xyzs[all_to_track_i[nbrs, 0], 2] if interp == 'rbf': try: rbf = Rbf(all_txy[nbrs, 0], all_txy[nbrs, 1], all_txy[nbrs, 2], nbr_z, epsilon=eps, function='linear') except np.linalg.LinAlgError: print("Linear algebra error") return np.nan z_pred = rbf(*target) elif interp == 'mlr': clf = linear_model.LinearRegression() clf.fit(all_txy[nbrs, :], nbr_z) z_pred = clf.predict([target])[0] elif interp == 'griddata': z_pred = scipy.interpolate.griddata(all_txy[nbrs, :], nbr_z, target, rescale=True) elif interp == 'krige': points = all_txy[nbrs, :] values = nbr_z gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1., nugget=0.001) gp.fit(all_txy[nbrs, :], nbr_z) z_pred = gp.predict([target])[0] elif interp == 'idw': delta = all_txy[nbrs, :] - np.asarray(target) delta[:, 0] *= 10 # rescale streamdistance? dists = utils.mag(delta) weights = (dists + eps)**(-2) weights = weights / weights.sum() z_pred = (weights * nbr_z).sum() return z_pred
def one_point_quad_cost(x, edge_scales, quads, para_scale, perp_scale): # orthogonality cost: ortho_cost = 0.0 base_scale = np.sqrt(para_scale**2 + perp_scale**2) quads[:, 0, :] = x # update the first point of each quad for quad in quads: cc = utils.poly_circumcenter(quad) dists = utils.mag(quad - cc) err = np.std(dists) / base_scale ortho_cost += 10 * err # ad hoc hoc hoc # length cost: scale_cost = 0.0 dists = utils.mag(x - edge_scales[:, :2]) errs = (dists - edge_scales[:, 2]) / edge_scales[:, 2] scale_cost = (2 * errs**2).sum() return ortho_cost + scale_cost
def vec_ll2utm(utm_xy): ll0=utm2ll(utm_xy) # go a bit more brute force: dlat=dlon=0.0001 ll_east=[ll0[0]+dlon,ll0[1]] ll_north=[ll0[0],ll0[1]+dlat] utm_east=ll2utm(ll_east) - utm_xy utm_north=ll2utm(ll_north) - utm_xy utm_to_true_distance_n=1000*utils.haversine(ll0,ll_north) / utils.mag(utm_north) utm_to_true_distance_e=1000*utils.haversine(ll0,ll_east) / utils.mag(utm_east) print("utm_to_true_distance e=%.5f n=%.5f"%(utm_to_true_distance_e,utm_to_true_distance_n)) east_norm=utils.to_unit(utm_east) # a matrix which left-multiples a true east/north vector, to # get an easting/northing vector. have not yet applied distance # correction rot=np.array([ [east_norm[0], -east_norm[1]], # u_east [east_norm[1], east_norm[0]]]) # v_north # e.g. if 100m in utm is 101m in reality, then a east/north vector gets rotated # to align with utm, but then we scale it up, too. # temporarily disable to see magnitude of its effect. # rot*=utm_to_true_distance # gets tricky... top row rot[0,:] yields a utm easting component, which will # later be multipled by a northing distance of the flux face. northing distances # should be adjusted to true with utm_to_distance_n. # but that made the results slightly worse... # it should be multiplication, but trying division... rot[0,:] /= utm_to_true_distance_n rot[1,:] /= utm_to_true_distance_e # good news is that angle error in x is same as in y, about 1 degree for this point. #print("East is %.3f deg, vs 0"%(np.arctan2(utm_east[1],utm_east[0])*180/np.pi) ) #print("North is %.3f deg, vs 90"%(np.arctan2(utm_north[1],utm_north[0])*180/np.pi) ) return rot
def init_history(self): L_zhang=610e3 history_radii=[self.L*300e3/L_zhang, # self.L*610e3/L_zhang # this is what Zhang reports, but for me that's dry self.L*590e3/L_zhang ] # The y=1 bit is to help get fully into a cell self.history_cells=[self.grd.select_cells_nearest([r,1]) for r in history_radii ] self.history_r=utils.mag( self.grd.cells_center()[self.history_cells,:] ) Nh=len(self.history_cells) self.history=np.zeros( 0, [ ('t',np.float64), ('eta_model',np.float64,Nh), ('eta_ana', np.float64,Nh), ('ur_model',np.float64,Nh), ('ur_ana',np.float64,Nh)])
def update_particle_velocity_for_new_step(self): # face, layer, time. # assumes 2D here. u=self.current_nc.cell_east_velocity.values[self.nc_time_i,:] v=self.current_nc.cell_north_velocity.values[self.nc_time_i,:] self.U=np.array( [u,v] ).T # again assume 2D if self.unit_speed: mag=utils.mag(self.U) self.U[mag>0] /= mag[mag>0][:,None] if self.reverse: self.U*=-1 # note this is not a view, so this is safe. # A little dicey - this overwrites any memory of convergent edges. # so every input interval, it's going to forget self.P['u']=self.U[ self.P['c'] ]
def Xstep_output(self,n,ei,**kwargs): if self.fig is None: return if n%20: return self.prep_figure() ccoll=sim.grd.plot_cells(values=sim.hi,cmap='gray',ax=self.ax) cc=sim.grd.cells_center() ui = sim.get_center_vel(sim.uj) mags=utils.mag(ui) quiv=self.ax.quiver(cc[:,0],cc[:,1],ui[:,0],ui[:,1],mags,cmap=cm.rainbow,clim=[0,1.0]) # plt.colorbar(ccoll) self.ax.quiverkey(quiv,0.1,0.1,0.5,"0.5 m/s") self.ax.axis('equal') self.fig.canvas.draw() plt.pause(0.001)
def interp_invdist_plane(data): #data: dataset with target, stream_dist, sample_z stream_trans = data.stream_dist.values sample_z = data.sample_z.values valid = np.isfinite(stream_trans[:, 0]) dist = utils.mag(stream_trans[valid, :] * scale) if len(dist) == 0: return np.nan weights = (nugget + dist)**power # Fit a plane to the data clf = linear_model.LinearRegression() clf.fit(stream_trans[valid, :], sample_z[valid], weights) z_pred = clf.predict(np.array([[0, 0]]))[0] return z_pred
def samples_for_target(x_target, N=500): x_along = steady_streamline_twoways(g, U, x_target) x_across = steady_streamline_twoways(g_rot, U_rot, x_target) # nearby source samples dists = utils.mag(x_target - source_ds.x) close_samples = np.argsort(dists)[:N] close_distances = [] for s in close_samples: close_distances.append( stream_distance(x_target, s, x_along=x_along, x_across=x_across)) close_distances = np.array(close_distances) ds = xr.Dataset() ds['target'] = ('xy', ), x_target.copy() ds['target_z'] = (), dem(x_target) ds['stream_dist'] = ('sample', 'st'), close_distances ds['sample_z'] = ('sample', ), source_ds.z.values[close_samples] ds['sample_xy'] = ('sample', 'xy'), source_ds.x.values[close_samples] return ds
def get_tag_df(tag): tag_df=df[ df.TagID==tag ] tag_xy=np.c_[ tag_df.X_UTM.values, tag_df.Y_UTM.values ] codes=np.zeros( len(tag_df), np.int32) codes[:]=NOFLAG # Filter land positions z=dem_clip(tag_xy) # note that this fails when the position is on the other side of a levee # there is no check that the transition crossed land. codes[z>2.5] = FLAG_LAND time_s=tag_df.Epoch_Sec # this will bet updated as points are removed, and removed # points will keep the dvel that got them ejected. point_dvel=np.zeros(len(tag_df),np.float64) thresh=2.0 # change in speed in m/s while np.sum(codes==0)>1: # narrow to the valid set sel=(codes==NOFLAG) # valid mask sel_seg_uv=np.diff(tag_xy[sel],axis=0) / np.diff(time_s[sel])[:,None] sel_dvel=utils.mag( np.diff(sel_seg_uv,axis=0) ) sel_dvel=np.concatenate( ( [0],sel_dvel,[0] ) ) # pad to ends. sel_worst=np.argmax(sel_dvel) if sel_dvel[sel_worst]<thresh: break # map back to original index orig_idx=np.nonzero(sel)[0] # slices help deal with short datasets point_dvel[orig_idx[1:-1]]=sel_dvel[1:-1] worst=orig_idx[sel_worst] codes[worst]=FLAG_DVEL tag_df=tag_df.copy() tag_df['code']=codes tag_df['point_dvel']=point_dvel tag_df['dem_z']=z return tag_df
def samples_for_target(SD, x_target, N=500): x_along = SD.trace_along(x_target) x_across = SD.trace_across(x_target) # nearby source samples dists = utils.mag(x_target - source_ds.x) close_samples = np.argsort(dists)[:N] close_distances = [] for s in close_samples: close_distances.append( SD.stream_distance(x_target, s, x_along=x_along, x_across=x_across)) close_distances = np.array(close_distances) ds = xr.Dataset() ds['target'] = ('xy', ), x_target.copy() ds['target_z'] = (), np.nan ds['stream_dist'] = ('sample', 'st'), close_distances ds['sample_z'] = ('sample', ), source_ds.z.values[close_samples] ds['sample_xy'] = ('sample', 'xy'), source_ds.x.values[close_samples] return ds
def tag_df_figure(tag_df): fig=plt.figure(1) fig.clf() ax=fig.add_subplot(1,1,1) ax.set_title(tag_df.TagID.values[0]) tag_xy=np.c_[ tag_df.X_UTM.values, tag_df.Y_UTM.values ] codes=tag_df.code.values # node rendering: if 1: # validity codes boundaries=[-0.5,0.5,1.5,2.5] scat=ax.scatter(tag_xy[:,0],tag_xy[:,1],30,codes,cmap='inferno', vmin=boundaries[0],vmax=boundaries[-1]) cbar=plt.colorbar(scat,ax=ax,label='Code',ticks=[0,1,2],boundaries=boundaries) # values=[0,1,2],) cbar.set_ticklabels(["Good","Land","DVel"]) if 0: ax.scatter(tag_xy[:,0],tag_xy[:,1],30,point_dvel) # line rendering: if 1: # raw detections path ax.plot(tag_xy[:,0],tag_xy[:,1],'g-',lw=0.9,alpha=0.4) if 1: # filtered path ax.plot(tag_xy[codes==0,0], tag_xy[codes==0,1],'m-',lw=1.5,alpha=0.9) if 0: # plot per-segment velocity magnitude segs=np.array( [tag_xy[:-1,:], tag_xy[1:,:] ] ).transpose(1,0,2) scoll=collections.LineCollection(segs, array=utils.mag(seg_uv), cmap='jet') ax.add_collection(scoll) dem_clip.plot(ax=ax,cmap='Blues_r') ax.axis('equal') return fig
def move_particles(self, stop_t): """ Advance each particle to the correct state at stop_t. Assumes that no input (updating velocities) or output is needed between self.t_unix and stop_t. Caller is responsible for updating self.t_unix """ g = self.g for i, p in enumerate(self.P): # advance each particle to the correct state at stop_t part_t = self.t_unix if np.isnan(p['u'][0]): # probably first time this particle has been moved. self.P['u'][i] = self.U[self.P['c'][i]] while part_t < stop_t: dt_max_edge = np.inf j_cross = None j_cross_normal = None for j in g.cell_to_edges(p['c']): if j == p['j_last']: continue # don't cross back normal = self.edge_norm[j] if g.edges['cells'][j, 1] == p['c']: # ~checked normal = -normal # vector from xy to a point on the edge d_xy_n = g.nodes['x'][g.edges['nodes'][j, 0]] - p['x'] # perpendicular distance dp_xy_n = d_xy_n[0] * normal[0] + d_xy_n[1] * normal[1] assert dp_xy_n >= 0 #otherwise sgn probably wrong above #closing=u*normal[0] + v*normal[1] closing = self.P['u'][i, 0] * normal[0] + self.P['u'][ i, 1] * normal[1] if closing < 0: continue else: dt_j = dp_xy_n / closing if dt_j > 0 and dt_j < dt_max_edge: j_cross = j dt_max_edge = dt_j j_cross_normal = normal t_max_edge = part_t + dt_max_edge if t_max_edge > stop_t: # don't make it to the edge dt = stop_t - part_t part_t = stop_t j_cross = None else: dt = dt_max_edge part_t = t_max_edge # Take the step delta = self.P['u'][i] * dt # see if we're stuck if utils.mag(delta) / (utils.mag(delta) + utils.mag(self.P['x'][i])) < 1e-14: print("Steps are too small") part_t = stop_t continue self.P['x'][i] += delta if j_cross is not None: # cross edge j, update time. careful that j isn't boundary # or start sliding on boundary. # print "Cross edge" cells = g.edges['cells'][j_cross] if cells[0] == p['c']: new_c = cells[1] elif cells[1] == p['c']: new_c = cells[0] else: assert False # More scrutiny on the edge crossing - # would it take us out of the domain? then bounce and frown. # would it take us to a convergent edge? then bounce and frown. bounce = False if new_c < 0: bounce = True else: recross = (self.U[new_c, 0] * j_cross_normal[0] + self.U[new_c, 1] * j_cross_normal[1]) if recross <= 0: bounce = True if bounce: closing = (self.P['u'][i, 0] * j_cross_normal[0] + self.P['u'][i, 1] * j_cross_normal[1]) # slightly over-compensate, pushing away from problematic # edge print("BOUNCE") self.P['u'][i] -= 1.1 * j_cross_normal * closing self.P['j_last'][i] = j_cross else: self.P['c'][i] = new_c self.P['j_last'][i] = j_cross self.P['u'][i] = self.U[new_c] # HERE: need to check whether the new cell's # velocity is going to push us back, in which case # we should instead scoot along the tangent and # hide our faces. # actually, better solution is to handle *before* # the particle hits the edge. If we handle it after, # two particles converging on this edge will cross # paths, and that really doesn't pass the sniff test. # this is not good, but will let the sim complete if self.record_dense: self.append_state(self.dense)
def depth_fn(self,xy): r=utils.mag(xy) return -self.d0 * (1-r**2/self.L**2)
def steady_streamline_oneway(gtri, Uc, x0, max_t=3600, allow_divergence=False, u_min=1e-3): """ allow_divergence: allow edges to have divergent adjacent velocities. """ # trace some streamlines x0 = np.asarray(x0) c = gtri.select_cells_nearest(x0, inside=True) t = 0.0 # steady field, start the counter at 0.0 edge_norm = gtri.edges_normals() edge_ctr = gtri.edges_center() x = x0.copy() pnts = [x.copy()] cells = [c] # for debugging track the past cells e2c = gtri.edges['cells'] while (t < max_t) and (c >= 0): dt_max_edge = np.inf # longest time step we're allowed based on hitting an edge j_cross = None c_cross = None # the cell that would be entered j_cross_normal = None for j in gtri.cell_to_edges(c): if gtri.edges['cells'][j, 1] == c: # ~checked # normals point from cell 0 to cell 1 csgn = -1 else: csgn = 1 out_normal = csgn * edge_norm[ j] # normal of edge j pointing away from cell c d_xy_n = edge_ctr[j] - x # vector from xy to a point on the edge # perpendicular distance dp_xy_n = d_xy_n[0] * out_normal[0] + d_xy_n[1] * out_normal[1] if not allow_divergence: assert dp_xy_n >= -0.01 #otherwise csgn probably wrong above if dp_xy_n < 0.0: # roundoff error dp_xy_n = 0.0 closing = Uc[c, 0] * out_normal[0] + Uc[c, 1] * out_normal[1] # what cell would we be entering? if e2c[j, 0] == c: nbr_c = e2c[j, 1] elif e2c[j, 1] == c: nbr_c = e2c[j, 0] else: assert False if closing < 0: continue # moving away from that edge if len(cells) > 1 and nbr_c == cells[-2]: # print('Would be reentering cell %d. Skip that option'%nbr_c) continue if (dp_xy_n == 0.0) and (closing != 0.0): print("On edge j=%d, dp_xy_n is zero, and closing is %f" % (j, closing)) dt_j = dp_xy_n / closing if dt_j > 0 and dt_j < dt_max_edge: j_cross = j c_cross = nbr_c dt_max_edge = dt_j t_max_edge = t + dt_max_edge if t_max_edge > max_t: # don't make it to the edge dt = max_t - t t = max_t j_cross = None else: # this step will take us to the edge j_cross dt = dt_max_edge t = t_max_edge # Take the step delta = Uc[c] * dt x += delta pnts.append(x.copy()) if j_cross is not None: # crossing an edge c = c_cross if c < 0: break # with roundoff, good to make sure that we are properly on the # line segment of j_cross nodes = gtri.nodes['x'][gtri.edges['nodes'][j_cross]] tangent = nodes[1] - nodes[0] edgelen = utils.mag(tangent) tangent /= edgelen alpha = np.dot(x - nodes[0], tangent) / edgelen eps = 1e-4 if alpha < eps: print('alpha correction %f => %f' % (alpha, eps)) alpha = 1e-4 elif alpha > 1 - eps: print('alpha correction %f => %f' % (alpha, 1 - eps)) alpha = 1 - eps x = (1 - alpha) * nodes[0] + alpha * nodes[1] pnts[-1] = x.copy() cells.append(c) umag = utils.mag(Uc[c]) if umag <= u_min: # should only happen with rotate velocities # means we hit shore. break pnts = np.array(pnts) cells = np.array(cells) return pnts, cells
def steady_streamline_oneway(g, Uc, x0, max_t=3600, max_steps=1000, max_dist=None, u_min=1e-3, bidir=False): """ Trace a streamline downstream g: unstructured grid Uc: cell centered velocity vectors bidir: interpret velocities as principal directions, i.e. unique only down to a sign. when enabled, on each crossing into a new cell, the sign of the velocity is resolved to be consistent with the last cell. returns Dataset with positions x, cells, times """ # trace some streamlines x0 = np.asarray(x0) c = g.select_cells_nearest(x0, inside=True) if c is None: # can happen with the dual grid c = -1 t = 0.0 # steady field, start the counter at 0.0 x = x0.copy() times = [t] pnts = [x.copy()] cells = [c] dist = 0.0 stop_condition = "none" # This part is taking a lot of time -- allow precomputed values in the grid try: edge_norm = g.edges['normal'] except ValueError: edge_norm = g.edges_normals() try: edge_ctr = g.edges['center'] except ValueError: edge_ctr = g.edges_center() e2c = g.edges['cells'] def is_convergent(j, ca, Ua, cb, Ub): # True if a point on edge j will remain on edge j # due to cell velocities converging towards it nc1, nc2 = e2c[j, :] if nc1 < 0: conv_c1 = True elif nc1 == ca: conv_c1 = np.dot(Ua, edge_norm[j]) >= 0.0 elif nc1 == cb: conv_c1 = np.dot(Ub, edge_norm[j]) >= 0.0 else: raise Exception("BUG!") if nc2 < 0: conv_c2 = True elif nc2 == ca: conv_c2 = np.dot(Ua, edge_norm[j]) <= 0.0 elif nc2 == cb: conv_c2 = np.dot(Ub, edge_norm[j]) <= 0.0 else: raise Exception("BUG!") return (conv_c1 and conv_c2) c_U = Uc[c, :] # track the velocity for the current cell c while (t < max_t) and (c >= 0): dt_max_edge = np.inf # longest time step we're allowed based on hitting an edge j_cross = None c_cross = None # the cell that would be entered for j in g.cell_to_edges(c): if g.edges['cells'][j, 1] == c: # ~checked # normals point from cell 0 to cell 1 csgn = -1 else: csgn = 1 out_normal = csgn * edge_norm[ j] # normal of edge j pointing away from cell c d_xy_n = edge_ctr[j] - x # vector from xy to a point on the edge # perpendicular distance dp_xy_n = d_xy_n[0] * out_normal[0] + d_xy_n[1] * out_normal[1] if dp_xy_n < 0.0: # roundoff error dp_xy_n = 0.0 closing = (c_U[0] * out_normal[0] + c_U[1] * out_normal[1]) if closing <= 0.0: continue # moving away from or parallel to that edge # what cell would we be entering? if e2c[j, 0] == c: nbr_c = e2c[j, 1] elif e2c[j, 1] == c: nbr_c = e2c[j, 0] else: assert False #if (dp_xy_n==0.0): # print("On edge j=%d, dp_xy_n is zero, and closing is %f"%(j,closing)) dt_j = dp_xy_n / closing if dt_j < dt_max_edge: # dt_j>0 redundant with dp_xy_n==0.0 j_cross = j c_cross = nbr_c dt_max_edge = dt_j if (j_cross is not None) and (c_cross >= 0): c_cross_U = Uc[c_cross, :] if bidir and (np.dot(c_U, c_cross_U) < 0): # print("Flip cells %d -- %d"%(c,c_cross)) c_cross_U = -c_cross_U # don't modify Uc! else: c_cross_U = None # catch errors by unsetting this # Special case for sliding along an edge # note that we only want to do this when the edge is convergent. if dt_max_edge == 0.0 and j_cross is not None and is_convergent( j_cross, c, c_U, c_cross, c_cross_U): # print("Moving along edge") edge_tan = np.array( [-edge_norm[j_cross, 1], edge_norm[j_cross, 0]]) Utan = np.dot(c_U, edge_tan) # so edge_norm points from left to right. # and edge_tan points from n1 to n2 # n2 # | ---> norm # | # n1 # if Utan > 0: #moving toward n2 to_node = g.edges['nodes'][j_cross, 1] else: # moving toward n1 to_node = g.edges['nodes'][j_cross, 0] dist = utils.mag(x - g.nodes['x'][to_node]) dt_max_edge = dist / np.abs(Utan) t_max_edge = t + dt_max_edge if t_max_edge > max_t: # didn't make it to the node dt = max_t - t t = max_t delta = Utan * edge_tan * dt x += delta else: # move all the way and exactly to the node x = g.nodes['x'][to_node] t = t + dt_max_edge # And get off of this edge for j_node in g.node_to_edges(to_node): if j_node == j_cross: continue if c == e2c[j_node, 0]: j_cross = j_node c_cross = e2c[j_node, 1] break elif c == e2c[j_node, 1]: j_cross = j_node c_cross = e2c[j_node, 0] break else: raise Exception( "Couldn't find a good edge after going through node") else: t_max_edge = t + dt_max_edge if t_max_edge > max_t: # don't make it to the edge dt = max_t - t t = max_t j_cross = None else: # this step will take us to the edge j_cross dt = dt_max_edge t = t_max_edge # Take the step x += c_U * dt dist += utils.dist(x, pnts[-1]) pnts.append(x.copy()) cells.append(c) times.append(t) if j_cross is not None: # crossing an edge if c_cross < 0: # leaving the domain stop_condition = "leave_domain" break c = c_cross c_U = c_cross_U c_cross = None # catch errors c_cross_U = None # with roundoff, good to make sure that we are properly on the # line segment of j_cross nodes = g.nodes['x'][g.edges['nodes'][j_cross]] tangent = nodes[1] - nodes[0] edgelen = utils.mag(tangent) tangent /= edgelen alpha = np.dot(x - nodes[0], tangent) / edgelen eps = 1e-4 if alpha < eps: # print('alpha correction %f => %f'%(alpha,eps)) alpha = 1e-4 elif alpha > 1 - eps: # print('alpha correction %f => %f'%(alpha,1-eps)) alpha = 1 - eps x = (1 - alpha) * nodes[0] + alpha * nodes[1] pnts[-1] = x.copy() umag = utils.mag(Uc[c]) if umag <= u_min: # should only happen with rotate velocities # means we hit shore. break if len(pnts) >= max_steps: stop_condition = "max_steps" break if max_dist and (dist >= max_dist): stop_condition = "max_dist" break if t >= max_t: stop_condition = "max_t" ds = xr.Dataset() ds['time'] = ('time', ), np.array(times) ds['x'] = ('time', 'xy'), np.array(pnts) ds['cell'] = ('time', ), np.array(cells) ds['stop_condition'] = (), stop_condition return ds
z_pred = clf.predict(txy_pred) z_pred = z_pred.reshape(X.shape) elif interp == 'wmlr': clf = linear_model.LinearRegression() dists = utils.dist(all_txy[nbrs, :], target) weights = (dists + 1.0)**-2 clf.fit(all_txy[nbrs, :], nbr_z, weights) z_pred = clf.predict(txy_pred) z_pred = z_pred.reshape(X.shape) elif interp == 'idw': # bad. N = len(txy_pred) z_pred = np.zeros(N, np.float64) for i in range(N): delta = all_txy[nbrs, :] - txy_pred[i] delta[:, 0] *= 10 # rescale streamdistance dists = utils.mag(delta) weights = (dists + 0.1)**(-2) weights = weights / weights.sum() z_pred[i] = (weights * nbr_z).sum() z_pred = z_pred.reshape(X.shape) elif interp == 'krige': import sklearn.gaussian_process as GP points = all_txy[nbrs, :] values = nbr_z origin = points.mean(axis=0) # gp = GP.GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1., nugget=0.001) gp = GP.GaussianProcessRegressor(kernel=GP.kernels.ConstantKernel(1.0), n_restarts_optimizer=9) gp.fit(points, values) z_pred = gp.predict(txy_pred).reshape(X.shape)
def subdivide(): # what does cdt need to provide for this to work? vcenters = cdt.cells_center(refresh=True) n_edges = cdt.Nedges() to_subdivide = [] min_edge_length = 10.0 for j_g in g.valid_edge_iter(): a, b = g.edges['nodes'][j_g] j = cdt.nodes_to_edge([a, b]) cells = cdt.edges['cells'][j] assert cells.max() >= 0 for ci, c in enumerate(cells): if c < 0: continue # Need the signed distance here: pntV = vcenters[c] pntA = cdt.nodes['x'][a] pntB = cdt.nodes['x'][b] AB = pntB - pntA AV = pntV - pntA # just switch sign on this left = utils.to_unit(np.array([-AB[1], AB[0]])) if ci == 1: left *= -1 line_clearance = np.dot(left, AV) v_radius = utils.mag(AV) if utils.mag(AB) < min_edge_length: continue # line_clearance=utils.point_line_distance(vcenters[c], # cdt.nodes['x'][ [a,b] ] ) # v_radius=utils.dist( vcenters[c], cdt.nodes['x'][a] ) if (v_radius > 1.2 * line_clearance) and (v_radius > min_edge_length): # second check - make sure that neither AC nor BC are also on the # boundary c_j = cdt.cell_to_edges(c) count = cdt.edges['constrained'][c_j].sum() if count == 1: to_subdivide.append(j_g) break elif count == 0: print("While looking at edge %d=(%d,%d)" % (j_g, a, b)) raise Exception( "We should have found at least 1 boundary edge") elif count == 3: print( "WARNING: Unexpected count of boundary edges in one element: ", count) for j in to_subdivide: sys.stdout.write(str(j) + ",") sys.stdout.flush() g.split_edge(j) return len(to_subdivide)
def dist_ratio(D): # calculate per-edge gradients dDdn=(D[c2]-D[c1])/dg gradD=U_perot(g,g.edges_length()*dDdn,g.cells_area()) gradmag=utils.mag(gradD) return gradmag
ds=g.write_to_xarray() ds['hi']=('face',), hi ds['uj']=('edge',),uj ds['ei']=('face',),ei ui = sim.get_center_vel(sim.uj) ds['ui']=('face','two',),ui # ds.to_netcdf('frictionless.nc') ds.to_netcdf('manning-0.025.nc') ## plt.figure(2).clf() mag=utils.mag(ui) g.plot_cells(values=mag,cmap='jet',clim=[0,1]) ## import xarray as xr obs=xr.open_dataset("../../field/adcp/040518_BT/040518_5BTref-avg.nc") xy=np.c_[ obs.orig_x_sample.values, obs.orig_y_sample.values ] xy=xy[np.isfinite(xy[:,0]),:] #tran=model.extract_transect(xy=xy,time=-1,dx=2) cells=[g.select_cells_nearest(pnt,inside=True) for pnt in xy] cells=utils.remove_repeated(np.array([c for c in cells if c is not None]))
def postprocess(g, suffix=""): if suffix == "": pass elif suffix == 'med0': L = 35 # First cut at anisotropic median filter. # depends on the grid matching this set of BCs: # see smooth_original_aniso.py for dev details print("______________________MED0___________________") bcs = wkb2shp.shp2geom("../grid/snubby_junction/forcing-snubby-01.shp") OR_left = np.array(bcs['geom'][1].coords)[0] # left end at OR OR_right = np.array(bcs['geom'][1].coords)[-1] # right end at OR SJ_up_left = np.array(bcs['geom'][0].coords)[0] SJ_up_right = np.array(bcs['geom'][0].coords)[-1] SJ_dn_right = np.array(bcs['geom'][2].coords)[-1] SJ_dn_left = np.array(bcs['geom'][2].coords)[0] # nodes on boundaries: river_left = g.select_nodes_boundary_segment( coords=[OR_left, SJ_up_left]) river_right = g.select_nodes_boundary_segment( coords=[SJ_up_right, SJ_dn_right]) river_split = g.select_nodes_boundary_segment( coords=[SJ_dn_left, OR_right]) # cells on boundaries: river_left_cells = np.unique( [c for n in river_left for c in g.node_to_cells(n)]) river_right_cells = np.unique( [c for n in river_right for c in g.node_to_cells(n)]) river_split_cells = np.unique( [c for n in river_split for c in g.node_to_cells(n)]) # Solve a Laplace equation to get stream function from stompy.model import unstructured_diffuser diff = unstructured_diffuser.Diffuser(g) for c in river_left_cells: diff.set_dirichlet(0, cell=c) for c in river_right_cells: diff.set_dirichlet(100, cell=c) for c in river_split_cells: diff.set_dirichlet(50, cell=c) diff.construct_linear_system() diff.solve_linear_system(animate=False) # Stream function on the cell centers psi = diff.C_solved # do the smoothing on the grid itself... cc = g.cells_center() d_orig = g.cells['z_bed'] d_med = d_orig.copy() for c in utils.progress(g.valid_cell_iter()): # Get a large-ish neighborhood: #nbrs=np.array(g.select_cells_nearest( cc[c], count=200)) nbrs = np.nonzero(utils.mag(cc - cc[c]) < L)[0] alpha = 10 # controls the degree of anisotropy coords = np.c_[cc[nbrs], alpha * psi[nbrs]] coord0 = np.array([cc[c, 0], cc[c, 1], alpha * psi[c]]) dists = utils.mag(coords - coord0) # Will take the median of a subset of those: # again, scale by size of nbrs to be grid-invariant N = int(0.15 * len(nbrs)) subsel = np.argsort(dists)[:N] close_nbrs = nbrs[subsel] d_med[c] = np.median(d_orig[close_nbrs]) g.cells['z_bed'] = d_med else: assert False, "What is post processing suffix %s" % suffix
# find the hycom cell which best matches each edge # domain. N_bc_edge=len(nodes_bc)-1 edge_to_hycom=np.zeros( (N_bc_edge,2), np.int32) edge_hycom_bathy=np.zeros( N_bc_edge, np.float64 ) edge_inward_normal=np.zeros( (N_bc_edge,2), np.float64) edge_lengths=np.zeros( N_bc_edge, np.float64) edge_centers=0.5*(g.nodes['x'][nodes_bc[:-1]] + g.nodes['x'][nodes_bc[1:]]) vec_transforms=[None]*N_bc_edge for i in range(N_bc_edge): na,nb=g.nodes['x'][ nodes_bc[i:i+2] ] delta=nb-na edge_inward_normal[i]=[-delta[1],delta[0]] edge_lengths[i]=utils.mag(delta) vec_transforms[i]=vec_ll2utm( 0.5*(na+nb) ) hyc_dists=utils.dist( 0.5*(na+nb), xy ) row,col=np.nonzero( hyc_dists==hyc_dists.min() ) edge_to_hycom[i]=[row[0],col[0]] edge_hycom_bathy[i]=bathy( xy[row[0],col[0]]) edge_inward_normal /= utils.mag(edge_inward_normal)[:,None] # those check out. # ax.scatter( xy[edge_to_hycom[:,0],edge_to_hycom[:,1],0], # xy[edge_to_hycom[:,0],edge_to_hycom[:,1],1], # 40,edge_hycom_bathy,cmap='jet') # # ax.quiver( edge_centers[:,0],edge_centers[:,1],
joined = pd.merge(segments, model_data, left_index=True, right_index=True) ## if 1: # quiver with all stations fig = plt.figure(2) fig.clf() ax = fig.add_axes([0, 0, 1, 1]) mod.grid.plot_edges(lw=0.5, ax=ax, color='0.75', zorder=-3) plt.setp(ax.get_xticklabels(), visible=0) plt.setp(ax.get_yticklabels(), visible=0) color_by = '|u|' # color_by='Time (d)' if color_by == '|u|': scal = utils.mag(np.c_[joined.model_u.values, joined.model_v.values]) else: scal = (joined.time.values - joined.time.values.min()) / np.timedelta64(1, 'D') quiv = ax.quiver(joined.xm.values, joined.ym.values, joined.model_u.values, joined.model_v.values, scal, cmap='jet', scale=30.0) plt.colorbar(quiv, label=color_by) ax.quiverkey(quiv, 0.1, 0.1, 0.5, "0.5 m/s - depth avg") ax.axis((647091.6894404562, 647486.7958566407, 4185689.605777047, 4185968.1328291544)) fig.savefig('model-extracted-velocities.png', dpi=200)
def local_smooth(self, node_idxs, ij=None, n_iter=3, stencil_radius=1, free_nodes=None, min_halo=2): """ Fit regular grid patches iteratively within the subset of nodes given by node_idxs. Currently requires that node_idxs has a sufficiently large footprint to have some extra nodes on the periphery. node_idxs: list of node indices n_iter: count of how many iterations of smoothing are applied. stencil_radius: controls size of the patch that is fit around each node. min_halo: only nodes at least this many steps from a non-selected node are moved. free_subset: node indexes (i.e. indices of g.nodes) that are allowed to move. Defaults to all of node_idxs subject to the halo. """ g = self.g if ij is None: node_idxs, ij = g.select_quad_subset(ctr=None, max_cells=None, max_radius=None, node_set=node_idxs) halos = self.calc_halo(node_idxs) pad = 1 + stencil_radius ij = ij - ij.min(axis=0) + pad XY = np.nan * np.zeros( (pad + 1 + ij[:, 0].max(), pad + 1 + ij[:, 1].max(), 2), np.float64) XY[ij[:, 0], ij[:, 1]] = g.nodes['x'][node_idxs] stencil_rows = [] for i in range(-stencil_radius, stencil_radius + 1): for j in range(-stencil_radius, stencil_radius + 1): stencil_rows.append([i, j]) stencil = np.array(stencil_rows) # And fit a surface to the X and Y components # Want to fit an equation # x= a*i + b*j + c M = np.c_[stencil, np.ones(len(stencil))] new_XY = XY.copy() if free_nodes is not None: # use dict for faster tests free_nodes = {n: True for n in free_nodes} moved_nodes = {} for count in range(n_iter): new_XY[...] = XY for ni, n in enumerate(node_idxs): if halos[ni] < min_halo: continue if (free_nodes is not None) and (n not in free_nodes): continue # Cruft, pretty sure. # # Find that node in # ni=np.nonzero(node_idxs==n)[0] # assert len(ni)>0,"Somehow n wasn't in the quad subset" # ni=ni[0] # Query XY to estimate where n "should" be. i, j = ij[ni] XY_sten = ( XY[stencil[:, 0] + ij[ni, 0], stencil[:, 1] + ij[ni, 1]] - XY[i, j]) valid = np.isfinite(XY_sten[:, 0]) xcoefs, resid, rank, sing = np.linalg.lstsq(M[valid], XY_sten[valid, 0], rcond=-1) ycoefs, resid, rank, sing = np.linalg.lstsq(M[valid], XY_sten[valid, 1], rcond=-1) delta = np.array([xcoefs[2], ycoefs[2]]) new_x = XY[i, j] + delta if np.isfinite(new_x[0]): new_XY[i, j] = new_x moved_nodes[n] = True else: pass # print("Hit nans.") # Update all at once to avoid adding variance due to the order of nodes. XY[...] = new_XY # Update grid count = 0 for ni, n in enumerate(node_idxs): if n not in moved_nodes: continue i, j = ij[ni] dist = mag(XY[i, j] - g.nodes['x'][n]) if dist > 1e-6: g.modify_node(n, x=XY[i, j]) count += 1 for n in list(moved_nodes.keys()): for nbr in g.node_to_nodes(n): if nbr not in moved_nodes: moved_nodes[nbr] = True for n in moved_nodes.keys(): if (free_nodes is not None) and (n not in free_nodes): continue self.nudge_node_orthogonal(n)