tef_qsv1[ii - 1, tta] += dqsv11[counter] counter += 1 ds.close() tta += 1 #%% TEF processing # first form tidal averages tef_q0_lp = np.nan * np.ones_like(tef_q0) tef_q1_lp = np.nan * np.ones_like(tef_q1) tef_qs0_lp = np.nan * np.ones_like(tef_qs0) tef_qs1_lp = np.nan * np.ones_like(tef_qs1) tef_qsv0_lp = np.nan * np.ones_like(tef_qsv0) tef_qsv1_lp = np.nan * np.ones_like(tef_qsv1) for ii in range(ns): tef_q0_lp[ii, :] = zfun.filt_godin(tef_q0[ii, :]) tef_q1_lp[ii, :] = zfun.filt_godin(tef_q1[ii, :]) tef_qs0_lp[ii, :] = zfun.filt_godin(tef_qs0[ii, :]) tef_qs1_lp[ii, :] = zfun.filt_godin(tef_qs1[ii, :]) tef_qsv0_lp[ii, :] = zfun.filt_godin(tef_qsv0[ii, :]) tef_qsv1_lp[ii, :] = zfun.filt_godin(tef_qsv1[ii, :]) if False: # this way is BAD: it is sensitive the the number of bins # during less-stratified conditions!! qin = tef_q0_lp.copy() qout = tef_q0_lp.copy() qsin = tef_qs0_lp.copy() qsout = tef_qs0_lp.copy() qsvin = tef_qsv0_lp.copy() qsvout = tef_qsv0_lp.copy() # then mask for in and out parts (ocean end)
style='italic', size=fs+1) if rr == 1: ax.set_title(infile, size=fs+2) if rr < NR: ax.set_xticklabels([]) if rr == NR: ax.set_xlabel('Date', size=fs) ax.set_ylabel('Z (m)', size=fs) ax.tick_params(labelsize=fs-2) # tick labels rr += 1 if ('Ntotal' in vn_list) and False: # plot total watercolumn N dz = np.diff(zw, axis=1) Ntotal_A = np.sum(Ntotal*dz, axis=1) Ntotal_A_lp = zfun.filt_godin(Ntotal_A) Ntotal_A_lp = Ntotal_A_lp[12::24] fig2 = plt.figure(figsize=(13,8)) ax = fig2.add_subplot(111) ax.plot(Days, Ntotal_A_lp, '-k') ax.set_xlim((Days[0], Days[-1])) ax.set_xlabel('Date', size=fs) ax.text(.05, .05, 'Integrated Total N: millimole nitrogen meter-3', transform=ax.transAxes, style='italic', size=fs) ax.set_title(infile, size=fs+2) ax.tick_params(labelsize=fs-2) # tick labels plt.show()
# data['tef_q'].shape => (8761, 1000), so packed [hour, salinity bin] # sbins are packed low to high # ot is time in seconds from 1/1/1970 sbins = tef_ex['sbins'] ot = tef_ex['ot'] tef_q = tef_ex['tef_q'] tef_qs = tef_ex['tef_qs'] qnet = tef_ex['qnet'] fnet = tef_ex['fnet'] ssh = tef_ex['ssh'] # low-pass # tidal averaging tef_q_lp = zfun.filt_godin_mat(tef_q) tef_qs_lp = zfun.filt_godin_mat(tef_qs) qnet_lp = zfun.filt_godin(qnet) fnet_lp = zfun.filt_godin(fnet) ssh_lp = zfun.filt_godin(ssh) pad = 36 # subsample and cut off nans tef_q_lp = tef_q_lp[pad + dd_offset:-(pad+1):24, :] tef_qs_lp = tef_qs_lp[pad + dd_offset:-(pad+1):24, :] ot = ot[pad + dd_offset:-(pad+1):24] qnet_lp = qnet_lp[pad + dd_offset:-(pad+1):24] fnet_lp = fnet_lp[pad + dd_offset:-(pad+1):24] ssh_lp = ssh_lp[pad + dd_offset:-(pad+1):24] if counter == 0: q0 = tef_q_lp.copy() elif counter == 1:
A[:, 0, 1] = a3 A[:, 1, 1] = a4 to_test = 'godin' if to_test == 'hanning': # filter each one individually aa1 = zfun.filt_hanning(a1) aa2 = zfun.filt_hanning(a2) aa3 = zfun.filt_hanning(a3) aa4 = zfun.filt_hanning(a4) # and filter this using the function we are testing AA = zfun.filt_hanning_mat(A) elif to_test == 'godin': # filter each one individually aa1 = zfun.filt_godin(a1) aa2 = zfun.filt_godin(a2) aa3 = zfun.filt_godin(a3) aa4 = zfun.filt_godin(a4) # and filter this using the function we are testing AA = zfun.filt_godin_mat(A) # PLOTTING plt.close('all') fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.plot(t, aa1, '-r', t,
# ot is time in seconds from 1/1/1970 sbins = tef_ex['sbins'] ot = tef_ex['ot'] tef_q = tef_ex['tef_q'] tef_qs = tef_ex['tef_qs'] qnet = tef_ex['qnet'] qabs = np.abs(qnet) fnet = tef_ex['fnet'] ssh = tef_ex['ssh'] # low-pass if True: # tidal averaging tef_q_lp = zfun.filt_godin_mat(tef_q) tef_qs_lp = zfun.filt_godin_mat(tef_qs) qnet_lp = zfun.filt_godin(qnet) qabs_lp = zfun.filt_godin(qabs) fnet_lp = zfun.filt_godin(fnet) ssh_lp = zfun.filt_godin(ssh) pad = 36 else: # nday Hanning window nday = 120 nfilt = nday*24 tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt) tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt) qnet_lp = zfun.filt_hanning(qnet, n=nfilt) qabs_lp = zfun.filt_hanning(qabs, n=nfilt) fnet_lp = zfun.filt_hanning(fnet, n=nfilt) ssh_lp = zfun.filt_hanning(ssh, n=nfilt) pad = int(np.ceil(nfilt/2))
# sbins are packed low to high # ot is time in seconds from 1/1/1970 sbins = tef_ex['sbins'] ot = tef_ex['ot'] tef_q = tef_ex['tef_q'] tef_qs = tef_ex['tef_qs'] qnet = tef_ex['qnet'] fnet = tef_ex['fnet'] ssh = tef_ex['ssh'] # low-pass if True: # tidal averaging tef_q_lp = zfun.filt_godin_mat(tef_q) tef_qs_lp = zfun.filt_godin_mat(tef_qs) qnet_lp = zfun.filt_godin(qnet) fnet_lp = zfun.filt_godin(fnet) ssh_lp = zfun.filt_godin(ssh) pad = 36 else: # nday Hanning window nday = 5 nfilt = nday*24 tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt) tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt) qnet_lp = zfun.filt_hanning(qnet, n=nfilt) fnet_lp = zfun.filt_hanning(fnet, n=nfilt) ssh_lp = zfun.filt_hanning(ssh, n=nfilt) pad = int(np.ceil(nfilt/2)) # subsample and cut off nans
# get the Campbell River tide extraction pth = os.path.abspath(Ldir['parent'] + 'ptools/tide_obs_mod/') if pth not in sys.path: sys.path.append(pth) import obsfun indir = Ldir['parent'] + 'ptools_output/tide/' noaa_sn_dict, dfo_sn_dict, sn_dict = obsfun.get_sn_dicts() # load data year = 2017 name = 'Campbell River' sn = sn_dict[name] mod_dir = indir + 'mod_data/cas4_v2_lo6biom/' fn = mod_dir + 'tide_' + str(sn) + '_' + str(year) + '.p' tide = pd.read_pickle(fn) eta = np.array(tide['eta'].tolist()) eta_lp = zfun.filt_godin(eta) tide['eta_lp'] = eta_lp tlp = tide['eta_lp'] tlpd = tlp.resample('1D').mean() tlpd = tlpd.tz_localize(None) # get the hycom field from nsog indir = Ldir['LOo'] + 'misc/' fn = indir + 'zeta_df.p' zdf = pd.read_pickle(fn) # make a DataFrame that has all fields on the same time axis df = pd.DataFrame(index=pd.date_range(start='1/1/2017', end='1/1/2018')) df['ROMS Campbell River LP SSH'] = tlpd df['HYCOM N SoG SSH'] = zdf['z_sog']
#%% TEF processing #sum spatially tef_q_shelfbox = -np.sum(tef_q_plume, axis=2) + np.sum( tef_q_shelf, axis=2) + np.sum(tef_q_south, axis=2) tef_qs_shelfbox = -np.sum(tef_qs_plume, axis=2) + np.sum( tef_qs_shelf, axis=2) + np.sum(tef_qs_south, axis=2) tef_q_mouth = np.sum(tef_q_mouth0, axis=2) tef_qs_mouth = np.sum(tef_qs_mouth0, axis=2) # first form tidal averages tef_q_shelfbox_lp = np.nan * np.ones_like(tef_q_shelfbox) tef_q_mouth_lp = np.nan * np.ones_like(tef_q_mouth) tef_qs_shelfbox_lp = np.nan * np.ones_like(tef_qs_shelfbox) tef_qs_mouth_lp = np.nan * np.ones_like(tef_qs_mouth) for ii in range(ns): tef_q_shelfbox_lp[ii, :] = zfun.filt_godin(tef_q_shelfbox[ii, :]) tef_q_mouth_lp[ii, :] = zfun.filt_godin(tef_q_mouth[ii, :]) tef_qs_shelfbox_lp[ii, :] = zfun.filt_godin(tef_qs_shelfbox[ii, :]) tef_qs_mouth_lp[ii, :] = zfun.filt_godin(tef_qs_mouth[ii, :]) # start by making the low-passed flux arrays sorted # from high to low salinity # np.flipud equivalent to m[::-1,:,:], according to documentation # I prefer that as I know which axis I'm using rq_shelfbox = np.flipud(tef_q_shelfbox_lp) rqs_shelfbox = np.flipud(tef_qs_shelfbox_lp) rq_mouth = np.flipud(tef_q_mouth_lp) rqs_mouth = np.flipud(tef_qs_mouth_lp) qcs_mouth = np.cumsum(rq_mouth, axis=0) qcs_shelfbox = np.cumsum(rq_shelfbox, axis=0)
# Time filters # change to hour sampling, filling time gaps DFF = DFF.resample('H', how='mean') DFF = DFF.reindex(pd.date_range(DFF.index[1], DFF.index[-1], freq='H')) day_limit = 2 # fills gaps up to this number of days DFF_inter = DFF.interpolate(method='linear', limit=24 * day_limit) # create arrays for filtering DFF_array = DFF.as_matrix() DFF_header = DFF.columns.values # godin filter filt_array = np.array(DFF_array) for j in range(DFF_array.shape[1]): filt_array[:, j] = zfun.filt_godin(DFF_array[:, j]) # hanning filter for tf in tf_list: if tf == 'm': filt_m = np.array(DFF_array) for j in range(filt_array.shape[1]): filt_m[:, j] = zfun.filt_hanning(filt_array[:, j], n=720) elif tf == 'w': filt_w = np.array(DFF_array) for j in range(filt_array.shape[1]): filt_w[:, j] = zfun.filt_hanning(filt_array[:, j], n=168) elif tf == 'd': pass # reform dataframes
riv_df = riv_df0.loc[:, ['columbia', 'fraser', 'skagit']] # Tide pth = os.path.abspath(Ldir['parent'] + 'ptools/tide_obs_mod/') if pth not in sys.path: sys.path.append(pth) import obsfun noaa_sn_dict, dfo_sn_dict, sn_dict = obsfun.get_sn_dicts() t_indir = Ldir['parent'] + 'ptools_output/tide/mod_data/' + gtagex + '/' t_fn = t_indir + 'tide_' + str(sn_dict['Seattle']) + '_' + year + '.p' tide_df = pickle.load(open(t_fn, 'rb')) # remove the timezone tide_df = tide_df.tz_localize(None) # make a dataframe of spring-neap conditions eta = tide_df['eta'].values eta_rms = np.sqrt(zfun.filt_godin(eta**2)) tide_df['eta_rms'] = eta_rms # subsample to daily tide_daily_df = tide_df.loc[::24, 'eta_rms'] # Wind fnw = Ldir[ 'LOo'] + 'moor/' + gtagex + '_2017.01.01_2018.11.29/NANOOS_ChaBa_Buoy_hourly.nc' moor_ds = nc.Dataset(fnw) ot = moor_ds['ocean_time'][:] svstr = moor_ds['svstr'][:] svstr_lp = zfun.filt_AB8d(svstr) wind_time = [] for tt in ot: wind_time.append(Lfun.modtime_to_datetime(tt)) wind_df = pd.DataFrame(index=wind_time, columns=['svstr', 'svstr_lp'])
cc = 0 # a counter for vn in list_to_plot: ir, ic = zfun.get_irc(cc, NC) ax = axes[ir, ic] if low_pass == False: # raw if V[vn].ndim == 2: for n in nlist: ax.plot(days, V[vn][:, n], linestyle='-', color=cdict[n]) elif V[vn].ndim == 1: ax.plot(days, V[vn]) elif low_pass == True: # filtered (e.g. tidally_averaged) if V[vn].ndim == 2: for n in nlist: ax.plot(days, zfun.filt_godin(V[vn][:, n]), linestyle='-', color=cdict[n]) elif V[vn].ndim == 1: ax.plot(days, zfun.filt_godin(V[vn])) try: if not auto_lims: ax.set_ylim(lim_dict[vn][0], lim_dict[vn][1]) except KeyError: pass ax.xaxis.set_tick_params(labelrotation=45) ax.grid(True) ax.set_xlim(days[0], days[-1])
riv_df = riv_df0.loc[:, ['columbia', 'fraser', 'skagit']] # Tide pth = os.path.abspath(Ldir['parent'] + 'ptools/tide_obs_mod/') if pth not in sys.path: sys.path.append(pth) import obsfun noaa_sn_dict, dfo_sn_dict, sn_dict = obsfun.get_sn_dicts() t_indir = Ldir['parent'] + 'ptools_output/tide/mod_data/' + gtagex + '/' t_fn = t_indir + 'tide_' + str(sn_dict['Seattle']) + '_' + year + '.p' tide_df = pickle.load(open(t_fn, 'rb')) # remove the timezone tide_df = tide_df.tz_localize(None) # make a dataframe of spring-neap conditions eta = tide_df['eta'].values eta_rms = np.sqrt(zfun.filt_godin(eta**2)) tide_df['eta_rms'] = eta_rms # subsample to daily tide_daily_df = tide_df.loc[::24, 'eta_rms'] # Wind fnw = Ldir['LOo'] + 'moor/' + gtagex + '_2017.01.01_2018.11.29/NANOOS_ChaBa_Buoy_hourly.nc' moor_ds = nc.Dataset(fnw) ot = moor_ds['ocean_time'][:] svstr = moor_ds['svstr'][:] svstr_lp = zfun.filt_AB8d(svstr) wind_time = [] for tt in ot: wind_time.append(Lfun.modtime_to_datetime(tt)) wind_df = pd.DataFrame(index=wind_time, columns=['svstr', 'svstr_lp']) wind_df['svstr'] = svstr
def tef_details(fn): # this is much the same as tef_integrals() but it returns the raw fields # which I plan to use for making a TEF tutorial # choices tidal_average = False # which kind of time filtering nlay_max = 2 # maximum allowable number of layers to process # load results tef_dict = pickle.load(open(fn, 'rb')) tef_q = tef_dict['tef_q'] tef_qs = tef_dict['tef_qs'] sbins = tef_dict['sbins'] smax = sbins.max() qnet = tef_dict['qnet'] fnet = tef_dict['fnet'] ot = tef_dict['ot'] td = (ot - ot[0])/86400 NS = len(sbins) # low-pass if tidal_average: # tidal averaging tef_q_lp = zfun.filt_godin_mat(tef_q) tef_qs_lp = zfun.filt_godin_mat(tef_qs) qnet_lp = zfun.filt_godin(qnet) fnet_lp = zfun.filt_godin(fnet) pad = 36 else: # nday Hanning window nday = 5 nfilt = nday*24 tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt) tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt) qnet_lp = zfun.filt_hanning(qnet, n=nfilt) fnet_lp = zfun.filt_hanning(fnet, n=nfilt) pad = int(np.ceil(nfilt/2)) # subsample tef_q_lp = tef_q_lp[pad:-(pad+1):24, :] tef_qs_lp = tef_qs_lp[pad:-(pad+1):24, :] td = td[pad:-(pad+1):24] qnet_lp = qnet_lp[pad:-(pad+1):24] fnet_lp = fnet_lp[pad:-(pad+1):24] #find integrated TEF quantities # start by making the low-passed flux arrays sorted # from high to low salinity rq = np.fliplr(tef_q_lp) rqs = np.fliplr(tef_qs_lp) sbinsr = sbins[::-1] # then form the cumulative sum (the function Q(s)) Q = np.cumsum(rq, axis=1) nt = len(td) Qi = np.nan * np.zeros((nt, nlay_max)) Fi = np.nan * np.zeros((nt, nlay_max)) Qi_abs = np.nan * np.zeros((nt, nlay_max)) Fi_abs = np.nan * np.zeros((nt, nlay_max)) Sdiv = np.nan * np.zeros(nt) for tt in range(nt): imax = np.argmax(Q[tt,:]) imin = np.argmin(Q[tt,:]) # set the dividing salinity by the size of the transport Qin = rq[tt, 0:imax].sum() Qout = rq[tt, 0:imin].sum() if np.abs(Qin) > np.abs(Qout): idiv = imax else: idiv = imin # get the dividing salinity Sdiv[tt] = sbinsr[idiv] ivec = np.unique(np.array([0, idiv, NS+1])) nlay = len(ivec)-1 for ii in range(nlay): Qi[tt,ii] = rq[tt, ivec[ii]:ivec[ii+1]].sum() Qi_abs[tt,ii] = np.abs(rq[tt, ivec[ii]:ivec[ii+1]]).sum() Fi[tt,ii] = rqs[tt, ivec[ii]:ivec[ii+1]].sum() Fi_abs[tt,ii] = np.abs(rqs[tt, ivec[ii]:ivec[ii+1]]).sum() # form derived quantities Qcrit = np.abs(Qi[:,0]).mean()/5 Qi[np.abs(Qi)==0] = np.nan Si = Fi_abs/Qi_abs return Qi, Si, Fi, qnet_lp, fnet_lp, td, sbinsr, Q, rq, Sdiv, tef_q
ntop = N-1 for vn in list_to_plot: ir, ic = zfun.get_irc(cc, NC) ax = axes[ir, ic] if low_pass == False: # raw if V[vn].ndim == 2: ax.plot(days, V[vn][:, ntop], '-r') ax.plot(days, V[vn][:, nmid],'-g') ax.plot(days, V[vn][:, nbot], '-b') elif V[vn].ndim == 1: ax.plot(days, V[vn]) elif low_pass == True: # filtered (e.g. tidally_averaged) if V[vn].ndim == 2: ax.plot(days, zfun.filt_godin(V[vn][:, ntop]), '-r') ax.plot(days, zfun.filt_godin(V[vn][:, nmid]),'-g') ax.plot(days, zfun.filt_godin(V[vn][:, nbot]), '-b') elif V[vn].ndim == 1: ax.plot(days, zfun.filt_godin(V[vn])) try: if not auto_lims: ax.set_ylim(lim_dict[vn][0], lim_dict[vn][1]) except KeyError: pass ax.grid(True) ax.set_xlim(days[0], days[-1]) if ir == NR-1:
A[:,0,1] = a3 A[:,1,1] = a4 to_test = 'godin' if to_test == 'hanning': # filter each one individually aa1 = zfun.filt_hanning(a1) aa2 = zfun.filt_hanning(a2) aa3 = zfun.filt_hanning(a3) aa4 = zfun.filt_hanning(a4) # and filter this using the function we are testing AA = zfun.filt_hanning_mat(A) elif to_test == 'godin': # filter each one individually aa1 = zfun.filt_godin(a1) aa2 = zfun.filt_godin(a2) aa3 = zfun.filt_godin(a3) aa4 = zfun.filt_godin(a4) # and filter this using the function we are testing AA = zfun.filt_godin_mat(A) # PLOTTING plt.close('all') fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(t,aa1,'-r',t,aa2, '-b',t,aa3, '-g',t,aa4, '-m', linewidth=10, alpha=.2) ax.plot(t,AA[:,0,0],'-r',t,AA[:,1,0],'-b',t,AA[:,0,1],'-g',t,AA[:,1,1],'-m') ax.set_title(to_test.title()) plt.show()
s_df = pd.read_pickle(outdir + 'hourly_segment_salinity.p') seg_list = list(v_df.columns) # Form tidally averaged time series of net salt in segments. We do this exactly the same as # it was done in bulk_calc.py so the resulting time indices are identical. pad = 36 dt_list = list(v_df.index) dt_list = dt_list[pad:-(pad + 1):24] s_lp_df = pd.DataFrame(index=dt_list, columns=seg_list) v_lp_df = pd.DataFrame(index=dt_list, columns=seg_list) sv_lp_df = pd.DataFrame(index=dt_list, columns=seg_list) for seg_name in seg_list: v = v_df.loc[:, seg_name].values s = s_df.loc[:, seg_name].values sv = s * v v_lp = zfun.filt_godin(v) s_lp = zfun.filt_godin(s) sv_lp = zfun.filt_godin(sv) # subsample and cut off nans v_lp = v_lp[pad:-(pad + 1):24] s_lp = s_lp[pad:-(pad + 1):24] sv_lp = sv_lp[pad:-(pad + 1):24] # save to DataFrames s_lp_df.loc[:, seg_name] = s_lp v_lp_df.loc[:, seg_name] = v_lp sv_lp_df.loc[:, seg_name] = sv_lp # save results to disk. s_lp_df.to_pickle(outdir + 'daily_segment_salinity.p') v_lp_df.to_pickle(outdir + 'daily_segment_volume.p') sv_lp_df.to_pickle(outdir + 'daily_segment_net_salt.p')
def OBSOLETE_tef_details(fn): # this is much the same as tef_integrals() but it returns the raw fields # which I plan to use for making a TEF tutorial # choices tidal_average = False # which kind of time filtering nlay_max = 2 # maximum allowable number of layers to process # load results tef_dict = pickle.load(open(fn, 'rb')) tef_q = tef_dict['tef_q'] tef_qs = tef_dict['tef_qs'] sbins = tef_dict['sbins'] smax = sbins.max() qnet = tef_dict['qnet'] fnet = tef_dict['fnet'] ot = tef_dict['ot'] td = (ot - ot[0])/86400 NS = len(sbins) # low-pass if tidal_average: # tidal averaging tef_q_lp = zfun.filt_godin_mat(tef_q) tef_qs_lp = zfun.filt_godin_mat(tef_qs) qnet_lp = zfun.filt_godin(qnet) fnet_lp = zfun.filt_godin(fnet) pad = 36 else: # nday Hanning window nday = 5 nfilt = nday*24 tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt) tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt) qnet_lp = zfun.filt_hanning(qnet, n=nfilt) fnet_lp = zfun.filt_hanning(fnet, n=nfilt) pad = int(np.ceil(nfilt/2)) # subsample tef_q_lp = tef_q_lp[pad:-(pad+1):24, :] tef_qs_lp = tef_qs_lp[pad:-(pad+1):24, :] td = td[pad:-(pad+1):24] qnet_lp = qnet_lp[pad:-(pad+1):24] fnet_lp = fnet_lp[pad:-(pad+1):24] #find integrated TEF quantities # start by making the low-passed flux arrays sorted # from high to low salinity rq = np.fliplr(tef_q_lp) rqs = np.fliplr(tef_qs_lp) sbinsr = sbins[::-1] # then form the cumulative sum (the function Q(s)) Q = np.cumsum(rq, axis=1) nt = len(td) Qi = np.nan * np.zeros((nt, nlay_max)) Fi = np.nan * np.zeros((nt, nlay_max)) Qi_abs = np.nan * np.zeros((nt, nlay_max)) Fi_abs = np.nan * np.zeros((nt, nlay_max)) Sdiv = np.nan * np.zeros(nt) for tt in range(nt): imax = np.argmax(Q[tt,:]) imin = np.argmin(Q[tt,:]) # set the dividing salinity by the size of the transport Qin = rq[tt, 0:imax].sum() Qout = rq[tt, 0:imin].sum() if np.abs(Qin) > np.abs(Qout): idiv = imax else: idiv = imin # get the dividing salinity Sdiv[tt] = sbinsr[idiv] ivec = np.unique(np.array([0, idiv, NS+1])) nlay = len(ivec)-1 for ii in range(nlay): Qi[tt,ii] = rq[tt, ivec[ii]:ivec[ii+1]].sum() Qi_abs[tt,ii] = np.abs(rq[tt, ivec[ii]:ivec[ii+1]]).sum() Fi[tt,ii] = rqs[tt, ivec[ii]:ivec[ii+1]].sum() Fi_abs[tt,ii] = np.abs(rqs[tt, ivec[ii]:ivec[ii+1]]).sum() # form derived quantities Qcrit = np.abs(Qi[:,0]).mean()/5 Qi[np.abs(Qi)==0] = np.nan Si = Fi_abs/Qi_abs return Qi, Si, Fi, qnet_lp, fnet_lp, td, sbinsr, Q, rq, Sdiv, tef_q