예제 #1
0
a1 = np.cos(om1*t) + .5*np.cos(om*t)
a2 = .7*np.sin(om1*t) + .5*np.cos(om*t)
a3 = .3*np.cos(om1*t) + .5*np.cos(om*t)
a4 = .5*np.sin(om1*t) + .5*np.cos(om*t)
# also pack them in an array with time along axis 0
A = np.nan * np.ones((nt,2,2))
A[:,0,0] = a1
A[:,1,0] = a2
A[:,0,1] = a3
A[:,1,1] = a4

to_test = 'godin'

if to_test == 'hanning':
    #  filter each one individually
    aa1 = zfun.filt_hanning(a1)
    aa2 = zfun.filt_hanning(a2)
    aa3 = zfun.filt_hanning(a3)
    aa4 = zfun.filt_hanning(a4)
    # and filter this using the function we are testing
    AA = zfun.filt_hanning_mat(A)
elif to_test == 'godin':
    #  filter each one individually
    aa1 = zfun.filt_godin(a1)
    aa2 = zfun.filt_godin(a2)
    aa3 = zfun.filt_godin(a3)
    aa4 = zfun.filt_godin(a4)
    # and filter this using the function we are testing
    AA = zfun.filt_godin_mat(A)

# PLOTTING
예제 #2
0
    # create arrays for filtering
    DFF_array = DFF.as_matrix()
    DFF_header = DFF.columns.values

    # godin filter
    filt_array = np.array(DFF_array)
    for j in range(DFF_array.shape[1]):
        filt_array[:, j] = zfun.filt_godin(DFF_array[:, j])

    # hanning filter
    for tf in tf_list:
        if tf == 'm':
            filt_m = np.array(DFF_array)
            for j in range(filt_array.shape[1]):
                filt_m[:, j] = zfun.filt_hanning(filt_array[:, j], n=720)
        elif tf == 'w':
            filt_w = np.array(DFF_array)
            for j in range(filt_array.shape[1]):
                filt_w[:, j] = zfun.filt_hanning(filt_array[:, j], n=168)
        elif tf == 'd':
            pass

    # reform dataframes
    DFF_m = pd.DataFrame(filt_m, index=DFF.index, columns=DFF_header)
    DFF_w = pd.DataFrame(filt_w, index=DFF.index, columns=DFF_header)
    DFF_d = pd.DataFrame(filt_array, index=DFF.index, columns=DFF_header)

    # Climatology with Standard Deviation
    clim_DFF = pd.DataFrame(index=np.arange(1, 54), columns=DFF_d.columns)
    clim_std_DFF = pd.DataFrame(index=np.arange(1, 54), columns=DFF_d.columns)
nn_dict = {'salt': 1,
        'NO3': 2,
        'PH': 3,
        'temp': 5,
        'phytoplankton': 6,
        'ARAG': 7,
        'TIC': 4,
        'alkalinity': 8}

for vn in nn_dict:
    ax = fig.add_subplot(3,4,nn_dict[vn])
    
    for ww in ['0','1','2','3']:
        fld = V_dict[ww][vn]
        #ax.plot(mdt, wfun.fac_dict[vn] * zfun.filt_godin(fld), '-', color=cdict[ww])
        ax.plot(mdt, wfun.fac_dict[vn] * zfun.filt_hanning(fld, n=10*24),
            '-', color=cdict[ww], linewidth=lw)
    ax.grid(True)
    ax.set_xlim(mdt[0], mdt[-1])
    ax.ticklabel_format(useOffset=False, axis='y')
    ax.text(.05, .85, wfun.tstr_dict[vn] + ' ' + wfun.units_dict[vn],
        horizontalalignment='left', transform=ax.transAxes, fontsize=fs1)
    ax.xaxis.set_major_locator(mdates.MonthLocator())
    ax.set_xticklabels([])
    ax.set_ylim(wfun.vlims_dict[vn])
    # # add depth labels
    # if vn=='salt':
    #     ax.text(.95, .05, ('Z = %d (m)' % (int(zbot))),
    #         horizontalalignment='right', color='b',
    #         transform=ax.transAxes, fontsize=fs2, fontweight='bold')
    #     ax.text(.95, .15, ('Z = %d (m)' % (int(zmid))),
예제 #4
0
N = V['salt'].shape[0]
nbot = 0
nmid = nmid
ntop = N-1

fs1=14
fs2=16

ir = 0
ic = 0
for vn in list_to_plot:
    
    ax = axes[ir, ic]

    nfilt = 20
    ax.plot(mdt, zfun.filt_hanning(V[vn][ntop,:],nfilt), '-r', label='Surface',linewidth=3)
    ax.plot(mdt, zfun.filt_hanning(V[vn][nmid,:],nfilt),'-g', label='Mid-Depth',linewidth=3)
    ax.plot(mdt, zfun.filt_hanning(V[vn][nbot,:],nfilt), '-b', label='Deepest',linewidth=3)
    
    ax.set_ylim(lim_dict[vn][0], lim_dict[vn][1])

    ax.tick_params(axis = 'both', which = 'major', labelsize = fs1)
    
    ax.set_xlim(mdt[0], mdt[-1])
    if do_ticks:
        ax.set_xticks(dt_ticks)
        ax.set_xticklabels([])
        if ir == NR-1:
            ax.set_xlabel('Date',fontsize=fs1)
        ax.set_xticklabels(dt_ticklabels,fontsize=fs1)
        aa = ax.get_ylim()
예제 #5
0
def tef_details(fn):
    # this is much the same as tef_integrals() but it returns the raw fields
    # which I plan to use for making a TEF tutorial
    
    # choices
    tidal_average = False # which kind of time filtering
    nlay_max = 2 # maximum allowable number of layers to process
    
    # load results
    tef_dict = pickle.load(open(fn, 'rb'))
    tef_q = tef_dict['tef_q']
    tef_qs = tef_dict['tef_qs']
    sbins = tef_dict['sbins']
    smax = sbins.max()
    qnet = tef_dict['qnet']
    fnet = tef_dict['fnet']
    ot = tef_dict['ot']
    td = (ot - ot[0])/86400
    NS = len(sbins)

    # low-pass
    if tidal_average:
        # tidal averaging
        tef_q_lp = zfun.filt_godin_mat(tef_q)
        tef_qs_lp = zfun.filt_godin_mat(tef_qs)
        qnet_lp = zfun.filt_godin(qnet)
        fnet_lp = zfun.filt_godin(fnet)
        pad = 36
    else:
        # nday Hanning window
        nday = 5
        nfilt = nday*24
        tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt)
        tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt)
        qnet_lp = zfun.filt_hanning(qnet, n=nfilt)
        fnet_lp = zfun.filt_hanning(fnet, n=nfilt)
        pad = int(np.ceil(nfilt/2))

    # subsample
    tef_q_lp = tef_q_lp[pad:-(pad+1):24, :]
    tef_qs_lp = tef_qs_lp[pad:-(pad+1):24, :]
    td = td[pad:-(pad+1):24]
    qnet_lp = qnet_lp[pad:-(pad+1):24]
    fnet_lp = fnet_lp[pad:-(pad+1):24]

    #find integrated TEF quantities
    
    # start by making the low-passed flux arrays sorted
    # from high to low salinity
    rq = np.fliplr(tef_q_lp)
    rqs = np.fliplr(tef_qs_lp)
    sbinsr = sbins[::-1]
    # then form the cumulative sum (the function Q(s))
    Q = np.cumsum(rq, axis=1)
    nt = len(td)

    Qi = np.nan * np.zeros((nt, nlay_max))
    Fi = np.nan * np.zeros((nt, nlay_max))
    Qi_abs = np.nan * np.zeros((nt, nlay_max))
    Fi_abs = np.nan * np.zeros((nt, nlay_max))

    Sdiv = np.nan * np.zeros(nt)
    
    for tt in range(nt):
        
        imax = np.argmax(Q[tt,:])
        imin = np.argmin(Q[tt,:])
                
        # set the dividing salinity by the size of the transport
        Qin = rq[tt, 0:imax].sum()
        Qout = rq[tt, 0:imin].sum()
        if np.abs(Qin) > np.abs(Qout):
            idiv = imax
        else:
            idiv = imin
            
        # get the dividing salinity
        Sdiv[tt] = sbinsr[idiv]
                
        ivec = np.unique(np.array([0, idiv, NS+1]))
        nlay = len(ivec)-1

        for ii in range(nlay):
            Qi[tt,ii] = rq[tt, ivec[ii]:ivec[ii+1]].sum()
            Qi_abs[tt,ii] = np.abs(rq[tt, ivec[ii]:ivec[ii+1]]).sum()
            Fi[tt,ii] = rqs[tt, ivec[ii]:ivec[ii+1]].sum()
            Fi_abs[tt,ii] = np.abs(rqs[tt, ivec[ii]:ivec[ii+1]]).sum()
        
    # form derived quantities
    Qcrit = np.abs(Qi[:,0]).mean()/5
    Qi[np.abs(Qi)==0] = np.nan
    Si = Fi_abs/Qi_abs
    
    return Qi, Si, Fi, qnet_lp, fnet_lp, td, sbinsr, Q, rq, Sdiv, tef_q
예제 #6
0
fig, axes = plt.subplots(nrows=NR, ncols=NC, figsize=(15,8), squeeze=False)

days = (V['ocean_time'] - V['ocean_time'][0])/86400.

mdays = Lfun.modtime_to_mdate_vec(V['ocean_time'])

year = 2013 + (mdays - mdates.date2num(datetime(2013,1,1)))/356

cc = 0
nmid = round(V['salt'].shape[0]/2)
nfilt = 20
for vn in v3_list:
    ir = int(np.floor(cc/NC))
    ic = int(cc - NC*ir)
    ax = axes[ir, ic]
    ax.plot(year, zfun.filt_hanning(V[vn][-1,:],n=nfilt), '-r')
    ax.plot(year, zfun.filt_hanning(V[vn][nmid,:],n=nfilt),'-g')
    ax.plot(year, zfun.filt_hanning(V[vn][0,:],n=nfilt), '-b')
    ax.set_xlim(2013, 2016)
    ax.ticklabel_format(useOffset=False, axis='x')
    ax.ticklabel_format(useOffset=False, axis='y')
    ax.set_xticks([2013.5, 2014.5, 2015.5])
    ax.set_xticklabels([2013, 2014, 2015])
    ax.set_title(vn)
    yl = ax.get_ylim()
    ax.plot([2014, 2014],yl,'-k')
    ax.plot([2015, 2015],yl,'-k')
    ax.set_ylim(yl)
    cc += 1
for vn in v2_list:
    ir = int(np.floor(cc/NC))
예제 #7
0
    # low-pass
    if True:
        # tidal averaging
        tef_q_lp = zfun.filt_godin_mat(tef_q)
        tef_qs_lp = zfun.filt_godin_mat(tef_qs)
        qnet_lp = zfun.filt_godin(qnet)
        fnet_lp = zfun.filt_godin(fnet)
        ssh_lp = zfun.filt_godin(ssh)
        pad = 36
    else:
        # nday Hanning window
        nday = 5
        nfilt = nday*24
        tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt)
        tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt)
        qnet_lp = zfun.filt_hanning(qnet, n=nfilt)
        fnet_lp = zfun.filt_hanning(fnet, n=nfilt)
        ssh_lp = zfun.filt_hanning(ssh, n=nfilt)
        pad = int(np.ceil(nfilt/2))

    # subsample and cut off nans
    tef_q_lp = tef_q_lp[pad:-(pad+1):24, :]
    tef_qs_lp = tef_qs_lp[pad:-(pad+1):24, :]
    ot = ot[pad:-(pad+1):24]
    qnet_lp = qnet_lp[pad:-(pad+1):24]
    fnet_lp = fnet_lp[pad:-(pad+1):24]
    ssh_lp = ssh_lp[pad:-(pad+1):24]

    # get sizes and make sedges (the edges of sbins)
    DS=sbins[1]-sbins[0]
    sedges = np.concatenate((sbins,np.array([sbins[-1]] + DS))) - DS/2
예제 #8
0
def OBSOLETE_tef_details(fn):
    # this is much the same as tef_integrals() but it returns the raw fields
    # which I plan to use for making a TEF tutorial
    
    # choices
    tidal_average = False # which kind of time filtering
    nlay_max = 2 # maximum allowable number of layers to process
    
    # load results
    tef_dict = pickle.load(open(fn, 'rb'))
    tef_q = tef_dict['tef_q']
    tef_qs = tef_dict['tef_qs']
    sbins = tef_dict['sbins']
    smax = sbins.max()
    qnet = tef_dict['qnet']
    fnet = tef_dict['fnet']
    ot = tef_dict['ot']
    td = (ot - ot[0])/86400
    NS = len(sbins)

    # low-pass
    if tidal_average:
        # tidal averaging
        tef_q_lp = zfun.filt_godin_mat(tef_q)
        tef_qs_lp = zfun.filt_godin_mat(tef_qs)
        qnet_lp = zfun.filt_godin(qnet)
        fnet_lp = zfun.filt_godin(fnet)
        pad = 36
    else:
        # nday Hanning window
        nday = 5
        nfilt = nday*24
        tef_q_lp = zfun.filt_hanning_mat(tef_q, n=nfilt)
        tef_qs_lp = zfun.filt_hanning_mat(tef_qs, n=nfilt)
        qnet_lp = zfun.filt_hanning(qnet, n=nfilt)
        fnet_lp = zfun.filt_hanning(fnet, n=nfilt)
        pad = int(np.ceil(nfilt/2))

    # subsample
    tef_q_lp = tef_q_lp[pad:-(pad+1):24, :]
    tef_qs_lp = tef_qs_lp[pad:-(pad+1):24, :]
    td = td[pad:-(pad+1):24]
    qnet_lp = qnet_lp[pad:-(pad+1):24]
    fnet_lp = fnet_lp[pad:-(pad+1):24]

    #find integrated TEF quantities
    
    # start by making the low-passed flux arrays sorted
    # from high to low salinity
    rq = np.fliplr(tef_q_lp)
    rqs = np.fliplr(tef_qs_lp)
    sbinsr = sbins[::-1]
    # then form the cumulative sum (the function Q(s))
    Q = np.cumsum(rq, axis=1)
    nt = len(td)

    Qi = np.nan * np.zeros((nt, nlay_max))
    Fi = np.nan * np.zeros((nt, nlay_max))
    Qi_abs = np.nan * np.zeros((nt, nlay_max))
    Fi_abs = np.nan * np.zeros((nt, nlay_max))

    Sdiv = np.nan * np.zeros(nt)
    
    for tt in range(nt):
        
        imax = np.argmax(Q[tt,:])
        imin = np.argmin(Q[tt,:])
                
        # set the dividing salinity by the size of the transport
        Qin = rq[tt, 0:imax].sum()
        Qout = rq[tt, 0:imin].sum()
        if np.abs(Qin) > np.abs(Qout):
            idiv = imax
        else:
            idiv = imin
            
        # get the dividing salinity
        Sdiv[tt] = sbinsr[idiv]
                
        ivec = np.unique(np.array([0, idiv, NS+1]))
        nlay = len(ivec)-1

        for ii in range(nlay):
            Qi[tt,ii] = rq[tt, ivec[ii]:ivec[ii+1]].sum()
            Qi_abs[tt,ii] = np.abs(rq[tt, ivec[ii]:ivec[ii+1]]).sum()
            Fi[tt,ii] = rqs[tt, ivec[ii]:ivec[ii+1]].sum()
            Fi_abs[tt,ii] = np.abs(rqs[tt, ivec[ii]:ivec[ii+1]]).sum()
        
    # form derived quantities
    Qcrit = np.abs(Qi[:,0]).mean()/5
    Qi[np.abs(Qi)==0] = np.nan
    Si = Fi_abs/Qi_abs
    
    return Qi, Si, Fi, qnet_lp, fnet_lp, td, sbinsr, Q, rq, Sdiv, tef_q