def expand_shape(varin): if len(varin.shape) == 1: # 1dim --> 3dim return(utils_conv.exp_k_to_kji(varin, len_j, len_i)) elif len(varin.shape) == 2: # 2dim --> 3dim sys.exit('case of two-dimensional ogrd is not implemented yet!') elif len(varin.shape) == 3: # already 3dim return(varin) # no expansion needed.
def plot_BSF(var, TorUgrid, nlevels=100, mappingtoolbox='basemap', proj='ortho', min = [], max = []): ''' uses utils_plt.emptyBasemapFig()''' # colormap if min == []: min = np.floor(np.nanmin(var)) # minimum value of varin if max == []: max = np.ceil(np.nanmax(var)) # maximum value of varin #cmap, norm = utils_plt.get_cmap(min, max, nlevels, scheme=utils_plt.get_viridis()) # viridis cmap = utils_plt.shiftCMap(ml.cm.seismic, midpoint=1-max/(max-min), name='shifted') # shifted blue white red # add cyclic border (to prevent gap in pcolor plot) var = utils_conv.add_cyclic(var) # choose U or T grid if TorUgrid == 'U': xvar = var.ULONG.values yvar = var.ULAT.values elif TorUgrid == 'T': xvar = var.TLONG.values yvar = var.TLAT.values # draw plot in new figure if mappingtoolbox == 'basemap': fig, map = utils_plt.emptyBasemapFig(proj) c1 = map.pcolor(xvar,yvar,var.values,latlon=True, cmap=cmap, rasterized=True) map.colorbar() elif mappingtoolbox == 'cartopy': fig, map = True, True#TODO return(fig, map)
def integrate_along_dens(dat, delta): # expand delta, the layer-thickness, from 1d to 3d by copying the columns if len(delta.shape)==1: delta = utils_conv.exp_k_to_kji(delta, dat.shape[-2], dat.shape[-1]) # calculate the total thickness of each column for normalisation (only count boxes with non-nan dat value) delta_sum = np.nansum(delta*(np.isnan(dat)==False).astype(int), axis=0) delta_sum[delta_sum==0] = np.nan # weighted sum and normalisation with delta_sum dat_int = np.nansum(dat*delta, axis=0) / delta_sum return(dat_int)
def expand_shape(varin): if len(varin.shape) == 1: # 1dim --> 3dim return (utils_conv.exp_k_to_kji(varin, len_j, len_i)) elif len(varin.shape) == 2: # 2dim --> 3dim sys.exit('case of two-dimensional ogrd is not implemented yet!') elif len(varin.shape) == 3: # already 3dim return (varin) # no expansion needed.
def integrate_along_dens(dat, delta): # expand delta, the layer-thickness, from 1d to 3d by copying the columns if len(delta.shape) == 1: delta = utils_conv.exp_k_to_kji(delta, dat.shape[-2], dat.shape[-1]) # calculate the total thickness of each column for normalisation (only count boxes with non-nan dat value) delta_sum = np.nansum(delta * (np.isnan(dat) == False).astype(int), axis=0) delta_sum[delta_sum == 0] = np.nan # weighted sum and normalisation with delta_sum dat_int = np.nansum(dat * delta, axis=0) / delta_sum return (dat_int)
def plot_BSF(var, TorUgrid, nlevels=100, mappingtoolbox='basemap', proj='ortho', min=[], max=[]): ''' uses utils_plt.emptyBasemapFig()''' # colormap if min == []: min = np.floor(np.nanmin(var)) # minimum value of varin if max == []: max = np.ceil(np.nanmax(var)) # maximum value of varin #cmap, norm = utils_plt.get_cmap(min, max, nlevels, scheme=utils_plt.get_viridis()) # viridis cmap = utils_plt.shiftCMap(ml.cm.seismic, midpoint=1 - max / (max - min), name='shifted') # shifted blue white red # add cyclic border (to prevent gap in pcolor plot) var = utils_conv.add_cyclic(var) # choose U or T grid if TorUgrid == 'U': xvar = var.ULONG.values yvar = var.ULAT.values elif TorUgrid == 'T': xvar = var.TLONG.values yvar = var.TLAT.values # draw plot in new figure if mappingtoolbox == 'basemap': fig, map = utils_plt.emptyBasemapFig(proj) c1 = map.pcolor(xvar, yvar, var.values, latlon=True, cmap=cmap, rasterized=True) map.colorbar() elif mappingtoolbox == 'cartopy': fig, map = True, True #TODO return (fig, map)
# - in-situ density --------------------------------------------------------------------- rho = utils_mask.mask_ATLANTIC(ncdat.RHO.mean(dim='time'), ncdat.REGION_MASK) rho = rho.mean(dim='nlon') # ======================================================================================= # Streamfunctions # ======================================================================================= ''' BSF: Barotropic Streamfunction MOC: Meridional Overturning Circulation Streamfunction MW: vertical volume transport MV: meridional volume transport ''' # --------------------------------------------------------------------------------------- # - Volume transports (in Sv) MV_mgrd = utils_transp.calc_MV(ncdat) # on model grid MV_projauxgrd = utils_conv.project_on_auxgrd( MV_mgrd, ncdat.ANGLE.values) # on auxiliary grid MW = utils_transp.calc_MW(ncdat) # valid on both grids try: MW_dens = utils_misc.loadvar(path_dens + 'MW_sig2_' + varname_binning) # load from file except: MW_dens = utils_conv.resample_dens_colwise( MW.values, sig2, PD_bins ) # resampled on density axis #! however, it's still the vertical transport!! utils_misc.savevar(MW_dens, path_dens + 'MW_sig2_' + varname_binning) # save to file # --------------------------------------------------------------------------------------- # - Streamfunctions (in Sv)... # ... on model grid
def resample_colwise(odat, ogrd, ngrd, method, fill_value=np.nan, mask='none', mono_method='filter'): ''' Uses: > utils_conv.resample_1dim_lininterp() Input: > odat: array of dim 1 or 3 | data on model grid > ogrd: array of dim | old grid > ngrd: new grid > method: string | 'lin' (linear interpolation), 'dMW_db' (for dMW calculation with density as reference for weighting) 'dMW_zdb' (for dMW calculation with isopycnal depth as reference for weighting) 'sum' (sum over all datapoints within bin on new grid) > fill_value: float or np.nan | value used to fill in for requested points outside the range of ogrd. > mask: mask of densityshape [j,i], default: all True (no mask) > mono_method: string | 'filter' (will sort ogrd (and odat) such that ogrd is monotonically increasing (not necess. in strict sense!)) 'force' (will manipulate ogrd such that strictly monotonically increasing (brute method, not recommended)) Output: > ndat: resampled data on new grid Comments: > add warning if negative gradients occur. > #!! for discreapencies at the low- and high-density borders see the comment in resample_1dim_weightedmean(). ''' print(' > columnwise resampling') # shape of data-array if len(odat.shape) == 3: len_j = odat.shape[-2] # assumed to be the second last entry len_i = odat.shape[-1] # assumed to be the last entry elif len(odat.shape) == 2: len_j = 1 # set to one, such that j-loop is executed only once. len_i = odat.shape[-1] # assumed to be the last entry elif len(odat.shape) == 1: len_j = 1 # set to one, such that j-loop is executed only once. len_i = 1 # set to one, such that i-loop is executed only once. # expand shape ngrd, ogrd and odat to 3 dimensions # note: singleton-dimensions are intended depending on original shape of odat def expand_shape(varin): if len(varin.shape) == 1: # 1dim --> 3dim return (utils_conv.exp_k_to_kji(varin, len_j, len_i)) elif len(varin.shape) == 2: # 2dim --> 3dim sys.exit('case of two-dimensional ogrd is not implemented yet!') elif len(varin.shape) == 3: # already 3dim return (varin) # no expansion needed. ngrd = expand_shape(ngrd) ogrd = expand_shape(ogrd) odat = expand_shape(odat) # get default for regional mask (i.e. do not mask anything) if mask == 'none': mask = np.ones(shape=[len_j, len_i], dtype=bool) # pre-allocation of ndat ndat = fill_value * np.ones_like(ngrd) # loop over columns for j in np.arange(len_j): utils_misc.ProgBar('step', step=j, nsteps=len_j) for i in np.arange(len_i): # skip masked [j,i]-tuples if mask[j, i] == False: continue # reduce ngrd, ogrd and odat to current column ngrd_ji = ngrd[:, j, i] ogrd_ji = ogrd[:, j, i] odat_ji = odat[:, j, i] # detect disjunct ogrd and ngrd and continue with next column if (np.nanmax(ogrd_ji) < np.nanmin(ngrd_ji)) or ( np.nanmax(ngrd_ji) < np.nanmin(ogrd_ji)): print( 'disjunct ogrd and ngrd at (j,i)=({}, {}). (please check conservation of integrated flux!)' .format(j, i)) continue # function to make grd strictly monotoneously increasing def make_mono(grd, dat, mono_method): if mono_method == 'sort': # sort grd and dat relative to grd idx_sort = np.argsort(grd) grd, dat = grd[idx_sort], dat[idx_sort] elif mono_method == 'force': # manipulate grd (i.e. increase dropping values until mon. incr.) for k in np.arange(1, ogrd.shape[0]): if ogrd_ji[k] <= ogrd_ji[k - 1]: ogrd_ji[k] = ogrd_ji[k - 1] + 1e-10 return (grd, dat) # resampling if method == 'lin': # simple linear interpolation # make monotoneously increasing if any(np.diff(ogrd_ji) <= 0): ogrd_ji, odat_ji = make_mono(ogrd_ji, odat_ji, mono_method) # linear interpolation try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:, j, i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp( odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value) elif method == 'dMW_db': # for dMW calculation with density as reference for weighting try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:, j, i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp( odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value=0) elif method == 'dMW_zdb': # for dMW calculation with isopycnal depth as reference for weighting try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:, j, i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp( odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value=0) ndat[gaps_border, j, i] = fill_value elif method == 'dMV': # for dMV calculation try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:, j, i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp( odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value=0) elif method == 'sum': #! doesn't work right now! try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:, j, i], gaps_border, gaps_center = resample_1dim_sum( odat_ji, ogrd_ji, ngrd, idxn_start, fill_value) ndat[:, j, i] = fill_gaps(ndat[:, j, i], gaps_border, gaps_center, fill_value) else: raise ValueError( 'unexpected method passed to resample_colwise.') utils_misc.ProgBar('done') return (np.squeeze(ndat) ) # remove singleton dimensions (i.e. (1d --> 3d) --> back to 1d)
# --------------------------------------------------------------------------------------- # - Density grid/bins SA = ncdat.SALT[0,:,:,:].values # absolute salinity PT = ncdat.TEMP[0,:,:,:].values # potential temperature CT = gsw.CT_from_pt(SA, PT) # conservative temperature sig2 = gsw.sigma2(SA, CT) # potential density anomaly referenced to 2000dbar RHO = ncdat.RHO[0,:,:,:].values*1000-1000 # in-situ density anomaly [SI] if densChoice == 'sig2': dens = sig2 elif densChoice == 'rho': dens = RHO # density bins (db), center-values (dbc) and thicknesses (ddb, ddbc) db = np.concatenate((np.linspace(dbsetup[0,0],dbsetup[0,1],dbsetup[0,2]), np.linspace(dbsetup[1,0],dbsetup[1,1],dbsetup[1,2]), np.linspace(dbsetup[2,0],dbsetup[2,1],dbsetup[2,2]), np.linspace(dbsetup[3,0],dbsetup[3,1],dbsetup[3,2]))) dbc = np.array([np.mean(db[i-1:i+1]) for i in np.arange(1,len(db))]) #! center-values | reasonable for non-eq-spaced db? ddb = utils_ana.canonical_cumsum(np.diff(db)/2, 2, axis=0, crop=True) # layer thickness of density_bins ddbc = np.diff(db) # layer thickness from midpoint to midpoint (#! note: it is 1 element longer than ddb) # depth of isopycnals (i.e. of density_bins at both staggered grids) z_t_3d = utils_conv.exp_k_to_kji(ncdat.z_t, dens.shape[-2], dens.shape[-1]) zdb = LGS(lambda: utils_conv.resample_colwise(z_t_3d, dens, db, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort'), path_zdb, 'zdb', noload=boolLGSnoload) zdbc = LGS(lambda: utils_conv.resample_colwise(z_t_3d, dens, dbc, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort'), path_zdbc, 'zdbc', noload=boolLGSnoload) #dzdb = utils_ana.canonical_cumsum(np.diff(zdb, axis=0)/2, 2, axis=0, crop=True) # layer thickness of isopycnal depths dzdb = np.diff(zdb, axis=0) del PT, CT, z_t_3d, dbsetup # --------------------------------------------------------------------------------------- # Volume representation for axis-scaling #! check if and where db to be replaced by dbc dz3d = utils_conv.exp_k_to_kji(ncdat.dz, dens.shape[-2], dens.shape[-1]) # in cgs TAREA3d = utils_conv.exp_ji_to_kji(ncdat.TAREA, dens.shape[0]) # in cgs vol3d = dz3d*TAREA3d # in cgs inds = np.digitize(dens, db) vol_dbs_glob = np.zeros(shape=[len(db)]) vol_dbs_reg = np.zeros(shape=[len(db)]) vol_dbs_col = np.zeros(shape=[len(db), dens.shape[-2], dens.shape[-1]])
sig2T = gsw.sigma2(SA, CT) # potential density anomaly referenced to 2000dbar RHO = ncdat.RHO[0,:,:,:].values*1000-1000 # in-situ density anomaly [SI] # - conversion T-->U sig2U = np.zeros_like(sig2T) foo1 = utils_ana.canonical_cumsum(sig2T, 2, axis=-1) sig2U[:,:-1,:-1] = .25*utils_ana.canonical_cumsum(foo1, 2, axis=-2) sig2U[:,-1,:-1] = .5*utils_ana.canonical_cumsum(sig2T, 2, axis=-1)[:,-1,:] sig2U[:,:-1,-1] = .5*utils_ana.canonical_cumsum(sig2T, 2, axis=-2)[:,:,-1] sig2U[:,-1,-1] = sig2T[:,-1,-1] # density bins: border-values (=DBb), center-values (=DBc) and thickness (=dDB) DBb = np.concatenate((np.linspace(DBsetup[0,0],DBsetup[0,1],DBsetup[0,2]), np.linspace(DBsetup[1,0],DBsetup[1,1],DBsetup[1,2]), np.linspace(DBsetup[2,0],DBsetup[2,1],DBsetup[2,2]), np.linspace(DBsetup[3,0],DBsetup[3,1],DBsetup[3,2]), np.linspace(DBsetup[4,0],DBsetup[4,1],DBsetup[4,2]))) DBc = np.convolve(DBb, [.5,.5])[1:-1] # find midpoints # depth of isopycnals (zDBbc) calculated as z(DBc) (=zDBc) and as c(zDBb) (=zDBbc) z_t_3d = utils_conv.exp_k_to_kji(ncdat.z_t, sig2U.shape[-2], sig2U.shape[-1]) # zDBc = LGS(lambda: utils_conv.resample_colwise(z_t_3d, sig2U, DBc, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort'), path_zDBc, 'zDBc', noload=False) # zDBb = LGS(lambda: utils_conv.resample_colwise(z_t_3d, sig2U, DBb, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort'), path_zDBb, 'zDBb', noload=False) # zDBbc = np.ones_like(zDBc) * np.nan # for j in np.arange(zDBb.shape[-2]): # for i in np.arange(zDBb.shape[-1]): # zDBbc[:,j,i] = np.convolve(zDBb[:,j,i], [.5,.5])[1:-1] # centre-values # thickness of DB (=dDB) and zDBb (=dzDB) dDB = np.diff(DBb) #dzDB = np.diff(zDBb, axis=0) del PT, CT, z_t_3d, DBsetup # --------------------------------------------------------------------------------------- # Volume representation for axis-scaling (fully on T-grid)
# - in-situ density --------------------------------------------------------------------- rho = utils_mask.mask_ATLANTIC(ncdat.RHO.mean(dim='time'), ncdat.REGION_MASK) rho = rho.mean(dim='nlon') # ======================================================================================= # Streamfunctions # ======================================================================================= ''' BSF: Barotropic Streamfunction MOC: Meridional Overturning Circulation Streamfunction MW: vertical volume transport MV: meridional volume transport ''' # --------------------------------------------------------------------------------------- # - Volume transports (in Sv) MV_mgrd = utils_transp.calc_MV(ncdat) # on model grid MV_projauxgrd = utils_conv.project_on_auxgrd(MV_mgrd, ncdat.ANGLE.values) # on auxiliary grid MW = utils_transp.calc_MW(ncdat) # valid on both grids try: MW_dens = utils_misc.loadvar(path_dens+'MW_sig2_'+varname_binning) # load from file except: MW_dens = utils_conv.resample_dens_colwise(MW.values, sig2, PD_bins) # resampled on density axis #! however, it's still the vertical transport!! utils_misc.savevar(MW_dens, path_dens+'MW_sig2_'+varname_binning) # save to file # --------------------------------------------------------------------------------------- # - Streamfunctions (in Sv)... # ... on model grid BSF_mgrd, MVzint = utils_BSF.calc_BSF_mgrd(MV_mgrd, dump_MVzint=True) MOC_mgrd_W, MWxint_mgrd = utils_MOC.calc_MOC_mgrd('W', MW, do_norm=True, dump_Mxint=True) #MOC_mgrd_V, MVxint_mgrd = utils_MOC.calc_MOC_mgrd('V', MV_projauxgrd, do_norm=True, dump_Mxint=True) dMOC_mgrd_W, dMOC_mgrd_W_norm, dMWxint_mgrd = utils_MOC.calc_MOC_mgrd_nparray('W', MW_dens, dump_Mxint=True)
# - conversion of sigma 2 on U-grid (using cannonical cumsum method) sig2U = np.zeros_like(sig2T) foo = utils_ana.canonical_cumsum(sig2T, 2, axis=-1) sig2U[:,:-1,:-1] = .25*utils_ana.canonical_cumsum(foo, 2, axis=-2) sig2U[:,-1,:-1] = .5*utils_ana.canonical_cumsum(sig2T, 2, axis=-1)[:,-1,:] sig2U[:,:-1,-1] = .5*utils_ana.canonical_cumsum(sig2T, 2, axis=-2)[:,:,-1] sig2U[:,-1,-1] = sig2T[:,-1,-1] del foo # density bins: border-values (=DBb), center-values (=DBc) and thickness (=dDB) DBb = np.concatenate((np.linspace(DBsetup[0,0],DBsetup[0,1],DBsetup[0,2]), np.linspace(DBsetup[1,0],DBsetup[1,1],DBsetup[1,2]), np.linspace(DBsetup[2,0],DBsetup[2,1],DBsetup[2,2]), np.linspace(DBsetup[3,0],DBsetup[3,1],DBsetup[3,2]), np.linspace(DBsetup[4,0],DBsetup[4,1],DBsetup[4,2]))) DBc = np.convolve(DBb, [.5,.5])[1:-1] # find midpoints dDB = np.diff(DBb) # depth of isopycnals calculated as z(DBc) = zDB z_t_3d = utils_conv.exp_k_to_kji(ncdat.z_t, sig2U.shape[-2], sig2U.shape[-1]) zDBc = utils_conv.resample_colwise(z_t_3d, sig2U, DBc, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort') # thickness of zDBc (=dzDB) (with first thickness from surf to first bin) dzDBc = np.vstack([utils_conv.exp_ji_to_kji(zDBc[0],1), np.diff(zDBc, axis=0)]) # ----------------------------------------------------------------------------- # PART II // TRANSPORTS # ----------------------------------------------------------------------------- # calculate transports MV = utils_transp.calc_MV(ncdat).values # = V * DX *DZ MVf = utils_transp.calc_MVflat(ncdat).values # = V * DX # ------------------------------------------------------------------------- # - MOC Streamfunction (in depth space) (in Sv) MVxint_mgrd = np.nansum(MV, axis=2)
CT = gsw.CT_from_pt(SA, PT) # conservative temperature sig2 = gsw.sigma2(SA, CT) # potential density anomaly referenced to 2000dbar RHO = ncdat.RHO[0,:,:,:].values*1000-1000 # in-situ density anomaly [SI] #db = np.linspace(28,42,100) # db = np.linspace(1.004,1.036,65)[np.mean(b[i-1:i+1]) for i in np.arange(1,len(b))] #db = np.concatenate((np.arange(28,35), np.linspace(35, 38, 50), np.arange(39, 43))) #db = np.concatenate((np.linspace(28, 33, 11), np.linspace(33.1, 37.5, 45), np.linspace(38, 43, 11))) # spec_{}to{}in{}steps #db = np.concatenate((np.linspace(20, 33, 14), np.linspace(33.1, 36, 30), np.linspace(36.05,37.5, 30), np.linspace(38, 43, 11))) db = np.concatenate((np.linspace(20, 33, 14), np.linspace(33.1, 36, 11), np.linspace(36.05,37.5, 11), np.linspace(38, 43, 11))) #db = np.concatenate((np.linspace(28, 33, 14), np.linspace(33.1, 36, 11), np.linspace(36.05,37.5, 11), np.linspace(38, 43, 11))) #db = np.concatenate((np.linspace(28, 33, 11), np.linspace(33.1, 37.5, 21), np.linspace(38, 43, 11))) # spec_{}to{}in{}steps # variables related to density_bins dbc = np.array([np.mean(db[i-1:i+1]) for i in np.arange(1,len(db))]) #! reasonable for non-eq-spaced db? ddb = utils_ana.canonical_cumsum(np.diff(db)/2, 2, crop=True) # layer thickness of density_bins ddbc = np.diff(db) # layer thickness from midpoint to midpoint (#! note: it is 1 element longer than ddb) # depth of isopycnals (i.e. of density_bins at both staggered grids) z_t_3d = utils_conv.exp_k_to_kji(ncdat.z_t, sig2.shape[-2], sig2.shape[-1]) zdb = utils_conv.resample_colwise(z_t_3d, sig2, db, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort') zdbc = utils_conv.resample_colwise(z_t_3d, sig2, dbc, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort') # total volume representated by db #! check if and where db to be replaced by dbc dz3d = utils_conv.exp_k_to_kji(ncdat.dz, sig2.shape[-2], sig2.shape[-1]) # in cgs TAREA3d = utils_conv.exp_ji_to_kji(ncdat.TAREA, sig2.shape[0]) # in cgs vol3d = dz3d*TAREA3d # in cgs vol_dbs_glob = np.zeros(shape=[len(db)]) vol_dbs_reg = np.zeros(shape=[len(db)]) vol_dbs_col = np.zeros(shape=[len(db), sig2.shape[-2], sig2.shape[-1]]) inds = np.digitize(sig2, db) for b in np.arange(len(db)): vol_dbs_glob[b] = np.sum(vol3d[inds==b]) # global # in cgs
CT = gsw.CT_from_pt(SA, PT) # conservative temperature sig2 = gsw.sigma2(SA, CT) # potential density anomaly referenced to 2000dbar # - conversion T-->U densU = np.zeros_like(sig2) foo1 = utils_ana.canonical_cumsum(sig2, 2, axis=-1) densU[:,:-1,:-1] = .25*utils_ana.canonical_cumsum(foo1, 2, axis=-2) densU[:,-1,:-1] = .5*utils_ana.canonical_cumsum(sig2, 2, axis=-1)[:,-1,:] densU[:,:-1,-1] = .5*utils_ana.canonical_cumsum(sig2, 2, axis=-2)[:,:,-1] densU[:,-1,-1] = sig2[:,-1,-1] # density bins: border-values (=dbb), center-values (=dbc) and thickness (=ddb) dbb = np.concatenate((np.linspace(dbsetup[0,0],dbsetup[0,1],dbsetup[0,2]), np.linspace(dbsetup[1,0],dbsetup[1,1],dbsetup[1,2]), np.linspace(dbsetup[2,0],dbsetup[2,1],dbsetup[2,2]), np.linspace(dbsetup[3,0],dbsetup[3,1],dbsetup[3,2]), np.linspace(dbsetup[4,0],dbsetup[4,1],dbsetup[4,2]))) dbc = np.convolve(dbb, np.array([.5,.5]))[1:-1] # depth of isopycnals (zdbbc) calculated as z(dbc) (=zdbc) and as c(zdbb) (=zdbbc) z_t_3d = utils_conv.exp_k_to_kji(ncdat.z_t, densU.shape[-2], densU.shape[-1]) zdbc = utils_conv.resample_colwise(z_t_3d, densU, dbc, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort') zdbb = utils_conv.resample_colwise(z_t_3d, densU, dbb, 'lin', fill_value=np.nan, mask = ATLboolmask, mono_method='sort') zdbbc = np.ones_like(zdbc) * np.nan for j in np.arange(zdbb.shape[-2]): for i in np.arange(zdbb.shape[-1]): zdbbc[:,j,i] = np.convolve(zdbb[:,j,i], np.array([.5,.5]))[1:-1] # centre-values # save variables utils_misc.savevar(sig2, path_sig2) utils_misc.savevar(densU, path_densU) utils_misc.savevar(zdbc, path_zdbc) utils_misc.savevar(zdbb, path_zdbb) utils_misc.savevar(zdbbc, path_zdbbc)
def resample_colwise(odat, ogrd, ngrd, method, fill_value=np.nan, mask='none', mono_method='filter'): ''' Uses: > utils_conv.resample_1dim_lininterp() Input: > odat: array of dim 1 or 3 | data on model grid > ogrd: array of dim | old grid > ngrd: new grid > method: string | 'lin' (linear interpolation), 'dMW_db' (for dMW calculation with density as reference for weighting) 'dMW_zdb' (for dMW calculation with isopycnal depth as reference for weighting) 'sum' (sum over all datapoints within bin on new grid) > fill_value: float or np.nan | value used to fill in for requested points outside the range of ogrd. > mask: mask of densityshape [j,i], default: all True (no mask) > mono_method: string | 'filter' (will sort ogrd (and odat) such that ogrd is monotonically increasing (not necess. in strict sense!)) 'force' (will manipulate ogrd such that strictly monotonically increasing (brute method, not recommended)) Output: > ndat: resampled data on new grid Comments: > add warning if negative gradients occur. > #!! for discreapencies at the low- and high-density borders see the comment in resample_1dim_weightedmean(). ''' print(' > columnwise resampling') # shape of data-array if len(odat.shape)==3: len_j = odat.shape[-2] # assumed to be the second last entry len_i = odat.shape[-1] # assumed to be the last entry elif len(odat.shape)==2: len_j = 1 # set to one, such that j-loop is executed only once. len_i = odat.shape[-1] # assumed to be the last entry elif len(odat.shape)==1: len_j = 1 # set to one, such that j-loop is executed only once. len_i = 1 # set to one, such that i-loop is executed only once. # expand shape ngrd, ogrd and odat to 3 dimensions # note: singleton-dimensions are intended depending on original shape of odat def expand_shape(varin): if len(varin.shape) == 1: # 1dim --> 3dim return(utils_conv.exp_k_to_kji(varin, len_j, len_i)) elif len(varin.shape) == 2: # 2dim --> 3dim sys.exit('case of two-dimensional ogrd is not implemented yet!') elif len(varin.shape) == 3: # already 3dim return(varin) # no expansion needed. ngrd = expand_shape(ngrd) ogrd = expand_shape(ogrd) odat = expand_shape(odat) # get default for regional mask (i.e. do not mask anything) if mask == 'none': mask = np.ones(shape=[len_j, len_i], dtype=bool) # pre-allocation of ndat ndat = fill_value * np.ones_like(ngrd) # loop over columns for j in np.arange(len_j): utils_misc.ProgBar('step', step=j, nsteps=len_j) for i in np.arange(len_i): # skip masked [j,i]-tuples if mask[j,i]==False: continue # reduce ngrd, ogrd and odat to current column ngrd_ji = ngrd[:,j,i] ogrd_ji = ogrd[:,j,i] odat_ji = odat[:,j,i] # detect disjunct ogrd and ngrd and continue with next column if (np.nanmax(ogrd_ji) < np.nanmin(ngrd_ji)) or (np.nanmax(ngrd_ji) < np.nanmin(ogrd_ji)): print('disjunct ogrd and ngrd at (j,i)=({}, {}). (please check conservation of integrated flux!)'.format(j, i)) continue # function to make grd strictly monotoneously increasing def make_mono(grd, dat, mono_method): if mono_method == 'sort': # sort grd and dat relative to grd idx_sort = np.argsort(grd) grd, dat = grd[idx_sort], dat[idx_sort] elif mono_method == 'force': # manipulate grd (i.e. increase dropping values until mon. incr.) for k in np.arange(1,ogrd.shape[0]): if ogrd_ji[k] <= ogrd_ji[k-1]: ogrd_ji[k] = ogrd_ji[k-1]+1e-10 return(grd, dat) # resampling if method == 'lin': # simple linear interpolation # make monotoneously increasing if any(np.diff(ogrd_ji)<=0): ogrd_ji, odat_ji = make_mono(ogrd_ji, odat_ji, mono_method) # linear interpolation try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:,j,i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp(odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value) elif method == 'dMW_db': # for dMW calculation with density as reference for weighting try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:,j,i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp(odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value=0) elif method == 'dMW_zdb': # for dMW calculation with isopycnal depth as reference for weighting try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:,j,i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp(odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value=0) ndat[gaps_border,j,i] = fill_value elif method == 'dMV': # for dMV calculation try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:,j,i], gaps_border, gaps_center = utils_conv.resample_1dim_lininterp(odat_ji, ogrd_ji, ngrd_ji, idxn_start, fill_value=0) elif method == 'sum': #! doesn't work right now! try: idxn_start = np.where(ngrd_ji > np.nanmin(ogrd_ji))[0][0] except: idxn_start = 0 #! ndat[:,j,i], gaps_border, gaps_center = resample_1dim_sum(odat_ji, ogrd_ji, ngrd, idxn_start, fill_value) ndat[:,j,i] = fill_gaps(ndat[:,j,i], gaps_border, gaps_center, fill_value) else: raise ValueError('unexpected method passed to resample_colwise.') utils_misc.ProgBar('done') return(np.squeeze(ndat)) # remove singleton dimensions (i.e. (1d --> 3d) --> back to 1d)
-1] = .5 * utils_ana.canonical_cumsum(sig2T, 2, axis=-2)[:, :, -1] sig2U[:, -1, -1] = sig2T[:, -1, -1] del foo # density bins: border-values (=DBb), center-values (=DBc) and thickness (=dDB) DBb = np.concatenate( (np.linspace(DBsetup[0, 0], DBsetup[0, 1], DBsetup[0, 2]), np.linspace(DBsetup[1, 0], DBsetup[1, 1], DBsetup[1, 2]), np.linspace(DBsetup[2, 0], DBsetup[2, 1], DBsetup[2, 2]), np.linspace(DBsetup[3, 0], DBsetup[3, 1], DBsetup[3, 2]), np.linspace(DBsetup[4, 0], DBsetup[4, 1], DBsetup[4, 2]))) DBc = np.convolve(DBb, [.5, .5])[1:-1] # find midpoints dDB = np.diff(DBb) # depth of isopycnals calculated as z(DBc) = zDB z_t_3d = utils_conv.exp_k_to_kji(ncdat.z_t, sig2U.shape[-2], sig2U.shape[-1]) zDBc = utils_conv.resample_colwise(z_t_3d, sig2U, DBc, 'lin', fill_value=np.nan, mask=ATLboolmask, mono_method='sort') # thickness of zDBc (=dzDB) (with first thickness from surf to first bin) dzDBc = np.vstack( [utils_conv.exp_ji_to_kji(zDBc[0], 1), np.diff(zDBc, axis=0)]) # ----------------------------------------------------------------------------- # PART II // TRANSPORTS