示例#1
0
def calcs(self, S, T, P):
    self['gpan'] = sw.gpan(S, T, P)
    self['pt'] = sw.ptmp(S, T, P)
    self['psigma0'] = sw.pden(S, T, P, 0) - 1000
    self['psigma1'] = sw.pden(S, T, P, 1000) - 1000
    self['psigma2'] = sw.pden(S, T, P, 2000) - 1000

    return self
示例#2
0
def plotFrame(t):
    fig.clf()
    ax=plotAx()
    cplot=[]; gplot=[]; qplot=[]; bcplot=[]
    for mat1,ax1 in zip(mat,ax):
        #Time
        fig.suptitle(plotter.num2time(t).strftime('%Y-%b-%d %H:%M'))
        fig.subplots_adjust(bottom=.15,top=.85)
        
        it=np.interp(t,mat1['t'],np.arange(0,len(mat1['t'])))
        rho1=sw.pden(mat1['salt'][:,:,int(it)]*(1.-it%1)+mat1['salt'][:,:,int(it)+1]*(it%1),
                     mat1['temp'][:,:,int(it)]*(1.-it%1)+mat1['temp'][:,:,int(it)+1]*(it%1),
                     0.0*mat1['temp'][:,:,int(it)]*(1.-it%1)+0.0*mat1['temp'][:,:,int(it)+1]*(it%1) )
        salt1=mat1['salt'][:,:,int(it)]*(1.-it%1)+mat1['salt'][:,:,int(it)+1]*(it%1)
        it=np.interp(t,mat0['t'],np.arange(0,len(mat0['t'])))
        rho0=sw.pden(mat0['salt'][:,:,int(it)]*(1.-it%1)+mat0['salt'][:,:,int(it)+1]*(it%1),
                     mat0['temp'][:,:,int(it)]*(1.-it%1)+mat0['temp'][:,:,int(it)+1]*(it%1),
                     0.0*mat0['temp'][:,:,int(it)]*(1.-it%1)+0.0*mat0['temp'][:,:,int(it)+1]*(it%1) )
        salt0=mat0['salt'][:,:,int(it)]*(1.-it%1)+mat0['salt'][:,:,int(it)+1]*(it%1)
        #print([np.nanmax(rho1-rho0),np.nanmin(rho1-rho0)])
        cplot1=ax1.contourf(mat1['lon'],mat1['lat'],rho1-rho0,[tick1 for tick1 in np.arange(-5.0,5.01,.25) if np.abs(tick1)>1e-8],cmap=cmap,extend='both')
        bcplot1=ax1.contour(mat1['lon'],mat1['lat'],salt1,levels=[31.5],colors='k',linewidth=1)
        
        it=np.interp(t,mat1['t'],np.arange(0,len(mat1['t'])))
        u1=mat1['u'][:,:,int(it)]*(1.-it%1)+mat1['u'][:,:,int(it)+1]*(it%1)
        v1=mat1['v'][:,:,int(it)]*(1.-it%1)+mat1['v'][:,:,int(it)+1]*(it%1)
        it=np.interp(t,mat0['t'],np.arange(0,len(mat0['t'])))
        u0=mat0['u'][:,:,int(it)]*(1.-it%1)+mat0['u'][:,:,int(it)+1]*(it%1)
        v0=mat0['v'][:,:,int(it)]*(1.-it%1)+mat0['v'][:,:,int(it)+1]*(it%1)
        qplot1=plotter.add_uv2d(ax1,mat1['lon'],mat1['lat'],(u1-u0)*2,(v1-v0)*2)
        #print([np.nanmin(u1-u0),np.nanmax(u1-u0),np.nanmin(v1-v0),np.nanmax(v1-v0)])
    
        tLim1=3.*int((t-2)/3)+np.array([2.,5.])
        obsll=[(lon1,lat1) for (lon1,lat1,type1,t1) in zip(obs['lon'],obs['lat'],obs['type'],obs['t']) if type1==6 and tLim1[0]<=t1<=tLim1[1]]
        lon1,lat1=zip(*obsll)
        gplot1=ax1.plot(lon1,lat1,'.',color=(1.,.55,0.),markersize=2,fillstyle='full')
        
        #Colorbar
        fig.subplots_adjust(right=.8)
        cax=fig.add_axes([.82,.15,.02,.7])
        cbar=fig.colorbar(cplot1,cax=cax,orientation='vertical',spacing='proportional')
        cbar.set_ticks([tick1 for tick1 in np.arange(-5.0,5.01,1.)])
        cbar.formatter=ticker.FuncFormatter(lambda x,pos:'{:.1f}'.format(x))
        cbar.update_ticks()
        cbar.set_label(r'Density difference [$\mathrm{kg m^{-3}}$]')
        
        cplot.append(cplot1)
        gplot.append(gplot1)
        qplot.append(qplot1)
        bcplot.append(bcplot1)
        
        #plt.tight_layout()
        
    return cplot,qplot,gplot,bcplot
示例#3
0
def potential_density(salt_PSU, temp_C, pres_db, lat, lon, pres_ref=0):
    """
    Calculate density from glider measurements of salinity and temperature.

    The Basestation calculates density from absolute salinity and potential
    temperature. This function is a wrapper for this functionality, where
    potential temperature and absolute salinity are calculated first.
    Note that a reference pressure of 0 is used by default.

    Parameters
    ----------
    salt_PSU : array, dtype=float, shape=[n, ]
        practical salinty
    temp_C : array, dtype=float, shape=[n, ]
    temperature in deg C
    pres_db : array, dtype=float, shape=[n, ]
        pressure in decibar
    lat : array, dtype=float, shape=[n, ]
        latitude in degrees north
    lon : array, dtype=float, shape=[n, ]
        longitude in degrees east

    Returns
    -------
    potential_density : array, dtype=float, shape=[n, ]


    Note
    ----
    Using seawater.dens does not yield the same results as this function. We
    get very close results to what the SeaGlider Basestation returns with this
    function. The difference of this function with the basestation is on
    average ~ 0.003 kg/m3
    """

    try:
        import gsw

        salt_abs = gsw.SA_from_SP(salt_PSU, pres_db, lon, lat)
        temp_pot = gsw.t_from_CT(salt_abs, temp_C, pres_db)
        pot_dens = gsw.pot_rho_t_exact(salt_abs, temp_pot, pres_db, pres_ref)
    except ImportError:
        import seawater as sw

        pot_dens = sw.pden(salt_PSU, temp_C, pres_db, pres_ref)

    pot_dens = transfer_nc_attrs(
        getframe(),
        temp_C,
        pot_dens,
        'potential_density',
        units='kg/m3',
        comment='',
        standard_name='potential_density',
    )
    return pot_dens
示例#4
0
def plotFrame(t):
    fig.clf()
    ax=plotAx()
    cplot=[]; gplot=[];  bcplot=[]
    for model1,ax1 in zip(model,ax):
        #Time
        fig.suptitle(plotter.num2time(t).strftime('%Y-%b-%d %H:%M'))
        fig.subplots_adjust(bottom=0.15,top=.9,right=.85)
        
        it=np.interp(t,model1.t,np.arange(0,len(model1.t)))
        salt1=model1.salt[:,:,int(it)]*(1.-it%1)+model1.salt[:,:,int(it)+1]*(it%1)
        temp1=model1.temp[:,:,int(it)]*(1.-it%1)+model1.temp[:,:,int(it)+1]*(it%1)
        z1=model1.z[:,:,int(it)]*(1.-it%1)+model1.z[:,:,int(it)+1]*(it%1)
        rho1=sw.pden(salt1,temp1,-z1,0.0*z1 )
        rho1=rho1-1e3
        
        z2,rho2=plotter.z_interp(z1,rho1,np.array([-300.,0]))
        print([np.nanmax(rho2),np.nanmin(rho2)])
        cplot1=ax1.contourf(np.reshape(model1.lon[:,0],(-1,1))+np.zeros(np.shape(z2)),-z2,rho2,[tick1 for tick1 in np.arange(18.,28.01,.5)],cmap=cmap,extend='neither')
        bcplot1=ax1.contour(model1.lon,-z1,salt1,levels=[31.5],colors='k',linewidths=.5,linestyles='-')
        bcplot.append(bcplot1)
        bcplot2=ax1.contour(model1.lon,-z1,rho1,levels=[26.5],linestyles='--',colors='k',linewidths=.5)
        bcplot.append(bcplot2)
        
        #Coast   
        h=-z1[:,0]; h[np.isnan(h)]=3.
        h=np.concatenate(([10e3],h,[10e3]))
        hlon=np.concatenate(([-135.],model1.lon[:,0],[-120.]))
        p=patch.Polygon(np.column_stack((hlon,h)),facecolor=(.5,.6,.5),edgecolor=None,closed=True)
        ax1.add_patch(p)
    
        tLim1=3.*int((t-2)/3.)+np.array([2.,5.])
        obsll=[(lon1,z1) for (lon1,z1,type1,t1) in zip(obs['lon'],obs['z'],obs['type'],obs['t']) if type1==6 and tLim1[0]<=t1<=tLim1[1]]
        lon1,z1=zip(*obsll)
        gplot1=ax1.plot([np.min(lon1),np.min(lon1),np.max(lon1),np.max(lon1)],
                         [np.min(z1),np.max(z1),np.max(z1),np.min(z1)],':',
                         color=(.5,.5,.5),linewidth=1.)
        
        #Colorbar
        cax=fig.add_axes([.855,.15,.02,.75])
        cbar=fig.colorbar(cplot1,cax=cax,orientation='vertical',spacing='vertical')
        cbar.set_ticks([tick1 for tick1 in np.arange(18.0,28.01,1.)])
        cbar.formatter=ticker.FuncFormatter(lambda x,pos:'{:.0f}'.format(x))
        cbar.update_ticks()
        cbar.set_label(r'Density-$10^3$ [$\mathrm{kg m^{-3}}$]')
        
        cplot.append(cplot1)
        gplot.append(gplot1)
        
        #plt.tight_layout()
        
    return cplot,gplot,bcplot
示例#5
0
def sigmatheta(s, t, p, pr=0):
    """
    :math:`\\sigma_{\\theta}` is a measure of the density of ocean water
    where the quantity :math:`\\sigma_{t}` is calculated using the potential
    temperature (:math:`\\theta`) rather than the in situ temperature and
    potential density of water mass relative to the specified reference
    pressure.

    Parameters
    ----------
    s(p) : array_like
           salinity [psu (PSS-78)]
    t(p) : array_like
           temperature [:math:`^\\circ` C (ITS-90)]
    p : array_like
        pressure [db]
    pr : number
         reference pressure [db], default = 0

    Returns
    -------
    sgmte : array_like
           density  [kg m :sup:`3`]

    Examples
    --------
    >>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22.
    >>> from seawater.library import T90conv
    >>> from oceans import sw_extras as swe
    >>> s = [0, 0, 0, 0, 35, 35, 35, 35]
    >>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
    >>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
    >>> swe.sigmatheta(s, t, p)
    array([ -0.157406  ,  -0.20476006,  -4.34886626,  -3.63884068,
            28.10633141,  28.15738545,  21.72863949,  22.59634627])

    References
    ----------
    .. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
    computation of fundamental properties of seawater. UNESCO Tech. Pap. in
    Mar. Sci., No. 44, 53 pp.  Eqn.(31) p.39.
    http://www.scor-int.org/Publications.htm

    .. [2] Millero, F.J., Chen, C.T., Bradshaw, A., and Schleicher, K. A new
    high pressure equation of state for seawater. Deap-Sea Research., 1980,
    Vol27A, pp255-264. doi:10.1016/0198-0149(80)90016-3

    """
    s, t, p, pr = list(map(np.asanyarray, (s, t, p, pr)))
    return sw.pden(s, t, p, pr) - 1000.0
示例#6
0
def sigmatheta(s, t, p, pr=0):
    """
    :math:`\\sigma_{\\theta}` is a measure of the density of ocean water
    where the quantity :math:`\\sigma_{t}` is calculated using the potential
    temperature (:math:`\\theta`) rather than the in situ temperature and
    potential density of water mass relative to the specified reference
    pressure.

    Parameters
    ----------
    s(p) : array_like
           salinity [psu (PSS-78)]
    t(p) : array_like
           temperature [:math:`^\\circ` C (ITS-90)]
    p : array_like
        pressure [db]
    pr : number
         reference pressure [db], default = 0

    Returns
    -------
    sgmte : array_like
           density  [kg m :sup:`3`]

    Examples
    --------
    >>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22.
    >>> from seawater.library import T90conv
    >>> import oceans.sw_extras.sw_extras as swe
    >>> s = [0, 0, 0, 0, 35, 35, 35, 35]
    >>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
    >>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
    >>> swe.sigmatheta(s, t, p)
    array([-0.157406  , -0.20476006, -4.34886626, -3.63884068, 28.10633141,
           28.15738545, 21.72863949, 22.59634627])

    References
    ----------
    Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
    computation of fundamental properties of seawater. UNESCO Tech. Pap. in
    Mar. Sci., No. 44, 53 pp.  Eqn.(31) p.39.
    http://www.scor-int.org/Publications.htm

    Millero, F.J., Chen, C.T., Bradshaw, A., and Schleicher, K. A new
    high pressure equation of state for seawater. Deap-Sea Research., 1980,
    Vol27A, pp255-264. doi:10.1016/0198-0149(80)90016-3

    """
    s, t, p, pr = list(map(np.asanyarray, (s, t, p, pr)))
    return sw.pden(s, t, p, pr) - 1000.0
示例#7
0
    def increasing_dens(self,
                        temperature=False,
                        qtemp=[],
                        salinity=False,
                        qsalt=[],
                        pressure=False,
                        qpres=[],
                        min_delta=0.01,
                        qf_ignore=['B', 'S', '?']):

        temp = self._convert_to_np_array(temperature)
        salt = self._convert_to_np_array(salinity)
        pres = self._convert_to_np_array(pressure)

        dens = sw.pden(salt, temp, pres, 0)

        dens_temp = False

        qfindex = np.full((len(temp)), False)
        new_qf = np.array([''] * len(temp))

        for i, d in enumerate(dens):
            if len(qtemp) > 0:
                if qtemp[i] in qf_ignore:
                    qfindex[i] = True
                    continue
            if len(qsalt) > 0:
                if qsalt[i] in qf_ignore:
                    qfindex[i] = True
                    continue
            if len(qpres) > 0:
                if qpres[i] in qf_ignore:
                    qfindex[i] = True
                    continue

            if dens_temp:
                if dens_temp > d:  # not increasing = bad
                    if (dens_temp - d) >= min_delta:
                        qfindex[i] = True

            dens_temp = d

        new_qf[qfindex] = 'B'

        qfindex_numeric = np.arange(len(temp))
        qfindex_numeric = qfindex_numeric[qfindex]

        return qfindex, new_qf, qfindex_numeric
示例#8
0
def getSpice(pden, temp, sal):

    import seawater as sw
    import numpy as np

    beta = 7.7e-4 * 1000
    alpha = 2.0e-4 * 1000
    Sline = np.array([33.2, 33.8])
    Tline = np.array([7.75, 6.6])
    Sarray = np.arange(30, 35, 0.01)
    m = (np.diff(Tline) / np.diff(Sline))
    Tarray = m * (Sarray - Sline[0]) + Tline[0]
    Pdarray = sw.pden(Sarray, Tarray, 100. + 0. * Tarray, 0.)
    #plot(Pdarray)
    S0 = np.interp(pden, Pdarray, Sarray)
    T0 = m * (S0 - Sline[0]) + Tline[0]
    dT = -(T0 - temp)
    dS = -(S0 - sal)
    tau = beta * (dS) + alpha * (dT)
    return tau
示例#9
0
def ctdproc(lista,
            temp_name='t068C',
            lathint='Latitude =',
            lonhint='Longitude =',
            cond_name='c0S/m',
            press_name='prDM',
            down_cast=True,
            looped=True,
            hann_f=False,
            hann_block=20,
            hann_times=2,
            latline=[],
            lonline=[]):
    '''
    This function do the basic proccess to all .cnv CTD data from
    given list.
    '''
    for fname in lista:

        lon, lat, data = ctdread(fname,
                                 press_name=press_name,
                                 down_cast=down_cast,
                                 lathint=lathint,
                                 lonhint=lonhint,
                                 lonline=lonline,
                                 latline=latline)

        if looped:
            data = loopedit(data)

        dataname = basename(fname)[1]

        if (data.shape[0] < 101) & (
                data.shape[0] >
                10):  # se o tamanho do perfil for com menos de 101 medidas

            if (data.shape[0] /
                    2) % 2 == 0:  # caso a metade dos dados seja par
                blk = (data.shape[0] / 2) + 1  # bloco = a metade +1
            else:
                blk = data.shape[0] / 2  # se for impar o bloco e a metade

            # remove spikes dos perfis de temperatura e condutividade
            data = despike(data, propname=temp_name, block=blk, wgth=2)
            data = despike(data, propname=cond_name, block=blk, wgth=2)
        elif data.shape[0] >= 101:
            # para perfis com mais de 101 medidas, utiliza-se blocos de 101
            data = despike(data, propname=temp_name, block=101, wgth=2)
            data = despike(data, propname=cond_name, block=101, wgth=2)
        else:
            print('radial muito rasa')

        # realiza média em caixa de 1 metro
        data = binning(data, delta=1.)
        if temp_name == 't068C':
            data['t090C'] = gsw.t90_from_t68(data['t068C'])

        data['sp'] = gsw.SP_from_C(data[cond_name] * 10, data['t090C'],
                                   data.index.values)

        if hann_f:
            times = 0
            while times < hann_times:
                data = hann_filter(data, 't090C', hann_block)
                data = hann_filter(data, 'sp', hann_block)
                times += 1

        data['pt'] = sw.ptmp(data['sp'], data['t090C'], data.index.values)
        #data['ct'] = gsw.CT_from_pt(data['sa'],data['pt'])
        data['psigma0'] = sw.pden(
            data['sp'], data['t090C'], data.index.values, pr=0) - 1000
        data['psigma1'] = sw.pden(
            data['sp'], data['t090C'], data.index.values, pr=1000) - 1000
        data['psigma2'] = sw.pden(
            data['sp'], data['t090C'], data.index.values, pr=2000) - 1000
        data['gpan'] = sw.gpan(data['sp'], data['t090C'], data.index.values)
        data['lat'] = lat
        data['lon'] = lon

        data.to_pickle(
            os.path.split(fname)[0] + '/' +
            os.path.splitext(os.path.split(fname)[1])[0])

        print(dataname)
## Get data from dives
data1 = func1(files1,get_dives=data1_loc[3][inds1])
data2 = func2(files2,get_dives=data2_loc[3][inds2])

## filter all data with median filters
good_inds_1 = [np.isfinite(np.array(T.astype('float32'))) & np.isfinite(np.array(S.astype('float32'))) & np.isfinite(np.array(b.astype('float32')))
               for T,S,b in zip(data1[4],data1[5],data1[7])]
if inst1 == 'BB2FLSG': # Seaglider VSF data is already filtered
    filtered_dep1 = [var[i] for var,i in zip(data1[6],good_inds_1)]
    filtered_VSF1 = [var[i] for var,i in zip(data1[7],good_inds_1)]
else:
    filtered_dep1 = [get_data.median_filter(var.astype('float32')[i]) for var,i in zip(data1[6],good_inds_1)]
    filtered_VSF1 = [get_data.median_filter(var.astype('float32')[i]) for var,i in zip(data1[7],good_inds_1)]
filtered_T1 = [np.interp(fd,d.astype('float32')[i],var.astype('float32')[i]) for fd,d,var,i in zip(filtered_dep1,data1[6],data1[4],good_inds_1)]
filtered_S1 = [np.interp(fd,d.astype('float32')[i],var.astype('float32')[i]) for fd,d,var,i in zip(filtered_dep1,data1[6],data1[5],good_inds_1)]
filtered_dens1 = [sw.pden(S,T,sw.pres(z,lat=CENTRAL_LAT))
                  for T,S,z in zip(filtered_T1,filtered_S1,filtered_dep1)]

good_inds_2 = [np.isfinite(T.astype('float32')) & np.isfinite(S.astype('float32')) & np.isfinite(b.astype('float32'))
               for T,S,b in zip(data2[4],data2[5],data2[7])]
if inst2 == 'BB2FLSG': # Seaglider VSF data is already filtered
    filtered_dep2 = [var[i] for var,i in zip(data2[6],good_inds_2)]
    filtered_VSF2 = [var[i] for var,i in zip(data2[7],good_inds_2)]
else:
    filtered_dep2 = [get_data.median_filter(var.astype('float32')[i]) for var,i in zip(data2[6],good_inds_2)]
    filtered_VSF2 = [get_data.median_filter(var.astype('float32')[i]) for var,i in zip(data2[7],good_inds_2)]
filtered_T2 = [np.interp(fd,d.astype('float32')[i],var.astype('float32')[i]) for fd,d,var,i in zip(filtered_dep2,data2[6],data2[4],good_inds_2)]
filtered_S2 = [np.interp(fd,d.astype('float32')[i],var.astype('float32')[i]) for fd,d,var,i in zip(filtered_dep2,data2[6],data2[5],good_inds_2)]
filtered_dens2 = [sw.pden(S,T,sw.pres(z,lat=CENTRAL_LAT))
                  for T,S,z in zip(filtered_T2,filtered_S2,filtered_dep2)]
T = Tm[np.newaxis, ...] + Ta
S = Sm[np.newaxis, ...] + Sa

# A transect at 165
ilon = 19

lon = lon[ilon]

T = T[..., ilon]
S = S[..., ilon]

D = np.empty_like(S)

for i in range(12):
    D[i] = sw.pden(S[i], T[i], Pm[..., np.newaxis], pr=0) - 1000

cs = np.array([22., 22.5, 23., 23.5, 24., 24.5, 25., 25.5, 26., 26.5])


def plot_dens(month=0):
    plt.contourf(lat,
                 Pm,
                 D[month],
                 cs,
                 vmin=22.,
                 vmax=26.5,
                 cmap=cmocean.cm.dense,
                 extend='both')
    plt.contour(lat, Pm, D[month], cs, colors='k')
    plt.ylim(450, 0)
        if tt == 0:
            bp_arr = (0 * zeta) * np.ones((NT, 1, 1))
        salt = ds1['salt'][0, :, :, :].squeeze()
        ptemp = ds1['temp'][0, :, :, :].squeeze()  # potential temperature
        z_r = zrfun.get_z(G['h'][:, :], 0 * zeta, S, only_rho=True)
        z_w = zrfun.get_z(G['h'][:, :], zeta, S, only_w=True)
        p = seawater.pres(-z_r, G['lat_rho'][0, 0])
        temp = seawater.temp(salt, ptemp, p)  # in situ temperature
        # for some reason seawater.dens throws errors if we don't do this
        sd = salt.data
        td = temp.data
        prd = p.data
        sd[salt.mask] = np.nan
        td[salt.mask] = np.nan
        prd[salt.mask] = np.nan
        prho = seawater.pden(sd, td, prd)  # potential density
        prho = np.ma.masked_where(salt.mask, prho)
        rho = seawater.dens(
            sd, td,
            prd)  # in-situ density from salinity, temperature, and pressure
        rho = np.ma.masked_where(salt.mask, rho)
        DZ = np.diff(z_w, axis=0)
        bp_arr[tt, :, :] = (g * rho * DZ).sum(axis=0)  # sums vertically
    ds1.close()
bp_mean = np.mean(
    bp_arr, axis=0)  # mean pressure of given location for entire time range
bp_anom = bp_arr - bp_mean

# initialize output Dataset
ds2 = nc.Dataset(out_fn, 'w')
        for var, i in zip(data1[7], good_inds_1)
    ]
filtered_T1 = [
    np.interp(fd,
              d.astype('float32')[i],
              var.astype('float32')[i])
    for fd, d, var, i in zip(filtered_dep1, data1[6], data1[4], good_inds_1)
]
filtered_S1 = [
    np.interp(fd,
              d.astype('float32')[i],
              var.astype('float32')[i])
    for fd, d, var, i in zip(filtered_dep1, data1[6], data1[5], good_inds_1)
]
filtered_dens1 = [
    sw.pden(S, T, sw.pres(z, lat=CENTRAL_LAT))
    for T, S, z in zip(filtered_T1, filtered_S1, filtered_dep1)
]

good_inds_2 = [
    np.isfinite(T.astype('float32')) & np.isfinite(S.astype('float32'))
    & np.isfinite(b.astype('float32'))
    for T, S, b in zip(data2[4], data2[5], data2[7])
]
if inst2 == 'BB2FLSG':  # Seaglider VSF data is already filtered
    filtered_dep2 = [var[i] for var, i in zip(data2[6], good_inds_2)]
    filtered_VSF2 = [var[i] for var, i in zip(data2[7], good_inds_2)]
else:
    filtered_dep2 = [
        get_data.median_filter(var.astype('float32')[i])
        for var, i in zip(data2[6], good_inds_2)
示例#14
0
文件: roms.py 项目: apaloczy/ap_tools
def pe(avgfile,
       grdfile,
       gridid=None,
       maskfile='/media/Armadillo/bkp/lado/MSc/work/ROMS/plot_outputs3/msk_shelf.npy',
       normalize=False,
       verbose=True):
    """
	USAGE
	-----
	t, pe = pe(avgfile, grdfile, gridid=None, maskfile='/media/Armadillo/bkp/lado/MSc/work/ROMS/plot_outputs3/msk_shelf.npy', normalize=False, verbose=True):
	Calculates Potential Energy (PE) change integrated within a control volume
	for each time record of a ROMS *.avg or *.his file. The change is computed relative to
	the initial conditions, i.e., rhop(x,y,z,t=ti) = rho(x,y,z,t=ti) - rho(x,y,z,t=t0).

                                          [-g*(rhop^2)]
	PE = Integrated in a control volume V [-----------]     # [J]
                                          [ 2*drhodz  ]

	If 'normalize' is set to 'True', then PE/V (mean PE density [J/m3]) is returned instead.
	Reference:
	----------
	Cushman-Roisin (1994): Introduction to Geophysical Fluid Dynamics, page 213,
	Combination of Equations 15-29 and 15-30.
	"""
    print("Loading outputs and grid.")

    ## Get outputs.
    avg = Dataset(avgfile)

    ## Load domain mask.
    if maskfile:
        mask = np.load(maskfile)
        if type(mask[0, 0]) == np.bool_:
            pass
        else:
            mask = mask == 1.

    ## Getting mask indices.
    ilon, ilat = np.where(mask)

    ## Get grid, time-dependent free-surface and topography.
    zeta = avg.variables['zeta']
    grd = pyroms.grid.get_ROMS_grid(gridid,
                                    zeta=zeta,
                                    hist_file=avgfile,
                                    grid_file=grdfile)

    ## Get time.
    t = avg.variables['ocean_time'][:]
    t = t - t[0]
    nt = t.size

    ## Get grid coordinates at RHO-points.
    lonr, latr = avg.variables['lon_rho'][:], avg.variables['lat_rho'][:]

    ## Get grid spacings at RHO-points.
    ## Find cell widths.
    dx = grd.hgrid.dx  # Cell width in the XI-direction.
    dy = grd.hgrid.dy  # Cell width in the ETA-direction.
    if maskfile:
        dA = dx[mask] * dy[mask]
    else:
        dA = dx * dy

    ## Get temp, salt.
    temp = avg.variables['temp']
    salt = avg.variables['salt']

    ## Find cell heights (at ti=0).
    zw = grd.vgrid.z_w[0, :]  # Cell depths (at ti=0).
    if maskfile:
        dz = zw[1:, ilat, ilon] - zw[:-1, ilat, ilon]  # Cell height.
    else:
        dz = zw[1:, :] - zw[:-1, :]
    dz = 0.5 * (dz[1:, :] + dz[:-1, :])  # Cell heights at W-points.

    ## Get pres, g and pden (at ti=0).
    p0 = -zw  # Approximation, for computational efficiency.
    p0 = 0.5 * (p0[1:, :] + p0[:-1, :])

    if maskfile:
        rho0 = pden(salt[0, :, ilat, ilon],
                    temp[0, :, ilat, ilon],
                    p0[:, ilat, ilon],
                    pr=0.)
    else:
        rho0 = pden(salt[0, :], temp[0, :], p0, pr=0.)

    if maskfile:
        g = grav(latr[mask])
    else:
        g = grav(latr)

    drho0 = rho0[1:, :] - rho0[:-1, :]
    rho0z = drho0 / dz  # Background potential density vertical gradient.

    PE = np.array([])
    for ti in range(nt):
        tp = ti + 1
        print("Processing time record %s of %s" % (tp, nt))

        if maskfile:
            rhoi = pden(salt[ti, :, ilat, ilon],
                        temp[ti, :, ilat, ilon],
                        p0[:, ilat, ilon],
                        pr=0.)
        else:
            rhoi = pden(salt[ti, :], temp[ti, :], p0, pr=0.)

        rhop = rhoi - rho0  # Density anomaly, i.e., rho(x,y,z,t=ti) - rho(x,y,z,t=0)
        rhop = 0.5 * (rhop[1:, :] + rhop[:-1, :])

        ## Find cell heights.
        zw = grd.vgrid.z_w[ti, :]  # Cell depths (at ti=0).
        if maskfile:
            dz = zw[1:, ilat, ilon] - zw[:-1, ilat, ilon]  # Cell height.
        else:
            dz = zw[1:, :] - zw[:-1, :]

        ## Find cell volumes.
        print(dx.shape, dy.shape, dz.shape)
        dV = dA * dz  # [m3]
        dV = 0.5 * (dV[1:, :] + dV[:-1, :])

        ## Gravitational Available Potential Energy density (energy/volume).
        print(g.shape)
        print(rhop.shape)
        print(rho0z.shape)
        pe = -g * (rhop**2) / (2 * rho0z)  # [J/m3]

        ## Do volume integral to calculate Gravitational Available Potential Energy of the control volume.
        Pe = np.sum(pe * dV)  # [J]

        if normalize:
            V = dV.sum()
            Pe = Pe / V
            print("")
            print("Total volume of the control volume is %e m3." % V)
            print(
                "Normalizing PE by this volume, i.e., mean PE density [J/m3].")
            print("")

        if verbose:
            if normalize:
                print("PE = %e J/m3" % Pe)
            else:
                print("PE = %e J" % Pe)

        PE = np.append(PE, Pe)

    return t, PE
示例#15
0
文件: roms.py 项目: apaloczy/ap_tools
def energy_diagnostics(avgfile,
                       grdfile,
                       rho0=1025.,
                       gridid=None,
                       maskfile='msk_shelf.npy',
                       normalize=True,
                       verbose=True):
    """
	USAGE
	-----
	t, HKE, TPE = energy_diagnostics(avgfile, grdfile, rho0=1025., gridid=None, maskfile='msk_shelf.npy', normalize=True, verbose=True)

	Calculates volume-integrated Horizontal Kinetic Energy (HKE) and Total Potential Energy (TPE)
	within a control volume for each time record of a ROMS *.avg or *.his file.
	"""
    avg = Dataset(avgfile)

    print("Loading outputs and grid.")
    ## Load domain mask.
    if maskfile:
        mask = np.load(maskfile)
        if type(mask[0, 0]) == np.bool_:
            pass
        else:
            mask = mask == 1.

    ## Getting mask indices.
    ilon, ilat = np.where(mask)

    ## Getting velocity field.
    try:
        U = avg.variables['u']
        V = avg.variables['v']
        uvrho3 = False
    except KeyError:
        U = avg.variables['u_eastward']
        V = avg.variables['v_northward']
        uvrho3 = True

    ## Get temp, salt.
    temp = avg.variables['temp']
    salt = avg.variables['salt']

    ## Get grid, time-dependent free-surface and topography.
    zeta = avg.variables['zeta']
    grd = pyroms.grid.get_ROMS_grid(gridid,
                                    zeta=zeta,
                                    hist_file=avgfile,
                                    grid_file=grdfile)

    ## Find cell widths at RHO-points.
    dx = grd.hgrid.dx  # Cell width in the XI-direction.
    dy = grd.hgrid.dy  # Cell width in the ETA-direction.
    if maskfile:
        dA = dx[mask] * dy[mask]
    else:
        dA = dx * dy

    ## Get pres, g and pden (at ti=0).
    p0 = -grd.vgrid.z_r[0, :]  # Approximation, for computational efficiency.

    if maskfile:
        g = grav(avg.variables['lat_rho'][:][mask])
    else:
        g = grav(avg.variables['lat_rho'][:])

    ## Get time.
    t = avg.variables['ocean_time'][:]
    t = t - t[0]
    nt = t.size

    KE = np.array([])
    PE = np.array([])
    for ti in range(nt):
        tp = ti + 1
        print("")
        print("Processing time record %s of %s" % (tp, nt))

        print("Calculating density.")
        if maskfile:
            rho = pden(salt[ti, :, ilat, ilon],
                       temp[ti, :, ilat, ilon],
                       p0[:, ilat, ilon],
                       pr=0.)
        else:
            rho = pden(salt[ti, :], temp[ti, :], p0, pr=0.)

        print("Loading velocities.")
        uu = U[ti, :]
        vv = V[ti, :]

        if not uvrho3:
            # Calculate u and v at PSI-points.
            u = 0.5 * (uu[:, 1:, :] + uu[:, :-1, :])
            v = 0.5 * (vv[:, :, 1:] + vv[:, :, :-1])
            # Calculate rho at PSI-points.
            rhop = 0.5 * (rho[:, 1:, :] + rho[:, :-1, :])
            rhop = 0.5 * (rhop[:, :, 1:] + rhop[:, :, :-1])
            if maskfile:
                u = u[:, ilat + 1, ilon + 1]
                v = v[:, ilat + 1, ilon + 1]
                rhop = rhop[:, ilat + 1, ilon + 1]
            else:
                pass
        else:
            # U and V both at RHO-points, no reshaping necessary.
            if maskfile:
                u = uu[:, ilat, ilon]
                v = vv[:, ilat, ilon]
            else:
                u = uu
                v = vv

        ## Find cell depths, invert z-axis direction (downward).
        if maskfile:
            z = -grd.vgrid.z_r[ti, :, ilat, ilon]
        else:
            z = -grd.vgrid.z_r[ti, :]

        ## Find cell heights.
        zw = grd.vgrid.z_w[ti, :]
        if maskfile:
            dz = zw[1:, ilat, ilon] - zw[:-1, ilat, ilon]  # Cell height.
        else:
            dz = zw[1:, :] - zw[:-1, :]

        ## Find cell volumes.
        dV = dA * dz  # [m3]

        print("Squaring velocities.")
        u2v2 = u * u + v * v

        ## Total Potential Energy (TPE) density relative to z=0 (energy/volume).
        pe = rho * g * z  # [J/m3]

        ## Horizontal Kinetic Energy (HKE) density (energy/volume).
        if not uvrho3:
            ke = 0.5 * rhop * u2v2  # [J/m3]
        else:
            ke = 0.5 * rho * u2v2  # [J/m3]

        ## Do volume integral to calculate TPE/HKE of the control volume.
        Pe = np.sum(pe * dV)  # [J]
        Ke = np.sum(ke * dV)  # [J]

        if normalize:
            Vol = dV.sum()
            Pe = Pe / Vol
            Ke = Ke / Vol
            if verbose and tp == 1:
                print("")
                print("Total volume of the control volume is %e m3." % Vol)
                print(
                    "Normalizing TPE/HKE by this volume, i.e., mean TPE/HKE density [J/m3]."
                )
                print("")

        if verbose:
            print("")
            if normalize:
                print("TPE/vol = %e J/m3" % Pe)
                print("HKE/vol = %e J/m3" % Ke)
            else:
                print("TPE = %e J" % Pe)
                print("HKE = %e J" % Ke)

        PE = np.append(PE, Pe)
        KE = np.append(KE, Ke)

    return t, KE, PE
示例#16
0
def do_load_mfdata(mesh, data, fname_list, var_list, sel_timeidx, sel_levidx, \
                   nsi, ndi, do_tmean, do_output,):    
    if ndi!=0:
        
        #_______________________________________________________________________
        # select time+depth range + compute time mean
        if do_tmean:
            data.value = MFDataset(fname_list[0],'r').variables[var_list[0]][sel_timeidx,:,sel_levidx].mean(axis=0)
            if len(fname_list[1]): data.value2 = MFDataset(fname_list[1],'r').variables[var_list[1]][sel_timeidx,:,sel_levidx].mean(axis=0)
            if len(fname_list[2]): data.value3 = MFDataset(fname_list[2],'r').variables[var_list[2]][sel_timeidx,:,sel_levidx].mean(axis=0)
        else:
            data.value = MFDataset(fname_list[0],'r').variables[var_list[0]][sel_timeidx,:,sel_levidx]
            if len(fname_list[1]): data.value2 = MFDataset(fname_list[1],'r').variables[var_list[1]][sel_timeidx,:,sel_levidx]
            if len(fname_list[2]): data.value3 = MFDataset(fname_list[2],'r').variables[var_list[2]][sel_timeidx,:,sel_levidx]
        
        #_______________________________________________________________________
        # compute potential density & temperatur if selected
        if any(x in data.var for x in ['pdens','ptemp','sigma']):
            dep   = np.matlib.repmat(mesh.zmid[sel_levidx],nsi,1)
            lat   = np.matlib.repmat(mesh.nodes_2d_yg[0:mesh.n2dn],len(sel_levidx),1).transpose()
            press = sw.pres(dep,lat)
            press_ref = 0
            if   '0' in data.var : press_ref=0
            elif '1' in data.var : press_ref=1000
            elif '2' in data.var : press_ref=2000
            elif '3' in data.var : press_ref=3000
            elif '4' in data.var : press_ref=4000
            elif '5' in data.var : press_ref=5000
            if 'sigma' in data.var: data.lname = '$\sigma_{'+str(int(press_ref/1000))+'}$ '+data.lname
            del dep,lat
            if do_tmean:
                if 'ptemp' in data.var: data.value = sw.ptmp(data.value2,data.value,press,press_ref)
                if any(x in data.var for x in ['pdens','sigma']): data.value = sw.pden(data.value2,data.value,press,press_ref)-1000.025 
            else:
                for it in range(0,data.value.shape[0]):
                    if 'ptemp' in data.var: data.value[it,:,:] = sw.ptmp(data.value2[it,:,:],data.value[it,:,:],press,press_ref)
                    if any(x in data.var for x in ['pdens','sigma']): data.value[it,:,:] = sw.pden(data.value2[it,:,:],data.value[it,:,:],press,press_ref)-1000.025 
            fname_list[1]=[]
            
        #_______________________________________________________________________    
        # compute depth mean + linear interpolation to selected depth levels
        if do_tmean:
            data.value = do_zinterp(mesh, data.value, data.depth, ndi, data.value.shape[0], sel_levidx,do_output)
            if len(fname_list[1]): data.value2 = do_zinterp(mesh, data.value2, data.depth, ndi, data.value2.shape[0], sel_levidx,do_output)
            if len(fname_list[2]): data.value3 = do_zinterp(mesh, data.value3, data.depth, ndi, data.value3.shape[0], sel_levidx,do_output)
        else:
            data.value = do_zinterp(mesh, data.value, data.depth, ndi, data.value.shape[1], sel_levidx,do_output)
            if len(fname_list[1]): data.value2 = do_zinterp(mesh, data.value2, data.depth, ndi, data.value2.shape[1], sel_levidx,do_output)
            if len(fname_list[2]): data.value3 = do_zinterp(mesh, data.value3, data.depth, ndi, data.value3.shape[1], sel_levidx,do_output)
        
    # 2D data:
    else: 
        
        if do_tmean:
            data.value = MFDataset(fname_list[0],'r').variables[var_list[0]][sel_timeidx,:].mean(axis=0)
            if len(fname_list[1]): data.value2 = MFDataset(fname_list[1],'r').variables[var_list[1]][sel_timeidx,:].mean(axis=0)
            if len(fname_list[2]): data.value3 = MFDataset(fname_list[2],'r').variables[var_list[2]][sel_timeidx,:].mean(axis=0)
        else:
            data.value = MFDataset(fname_list[0],'r').variables[var_list[0]][sel_timeidx,:]
            if len(fname_list[1]): data.value2 = MFDataset(fname_list[1],'r').variables[var_list[1]][sel_timeidx,:]
            if len(fname_list[2]): data.value3 = MFDataset(fname_list[2],'r').variables[var_list[2]][sel_timeidx,:]
    
    # kickout single array dimension
    data.value = data.value.squeeze()
    if len(fname_list[1]): data.value2 = data.value2.squeeze()
    if len(fname_list[2]): data.value3 = data.value3.squeeze()
    
    return(data) 
示例#17
0
def do_load_dataloop(mesh, data, fname_list, var_list, sel_timeidx, sel_levidx, \
                   nti, nsi, ndi, do_tmean, do_output,):    
    if ndi!=0:
        
        #_______________________________________________________________________
        # initialize data.value array
        if do_tmean:
            data.value = np.zeros((nsi,len(sel_levidx)),dtype='float32')
            if len(fname_list[1]): data.value2 = np.zeros((nsi,len(sel_levidx)),dtype='float32')
            if len(fname_list[1]): data.value3 = np.zeros((nsi,len(sel_levidx)),dtype='float32')
        else:
            data.value = np.zeros((nti,nsi,len(sel_levidx)))
            if len(fname_list[1]): data.value2 = np.zeros((nti*len(fname_list),nsi,len(sel_levidx)),dtype='float32')
            if len(fname_list[1]): data.value3 = np.zeros((nti*len(fname_list),nsi,len(sel_levidx)),dtype='float32')
        
        #_______________________________________________________________________
        # select time+depth range + compute time mean
        for it in range(0,len(fname_list[0])):
            if do_tmean:
                data.value = data.value + Dataset(fname_list[0][it],'r').variables[var_list[0]][sel_timeidx,:,sel_levidx].mean(axis=0)
                if len(fname_list[1]): data.value2 = data.value2 + Dataset(fname_list[1][it],'r').variables[var_list[1]][sel_timeidx,:,sel_levidx].mean(axis=0)
                if len(fname_list[2]): data.value3 = data.value3 + Dataset(fname_list[2][it],'r').variables[var_list[2]][sel_timeidx,:,sel_levidx].mean(axis=0)
            else:
                t_idx = sel_timeidx+nti*it
                data.value[t_idx,:,:] = data.value[t_idx,:,:] + Dataset(fname_list[0][it],'r').variables[var_list[0]][sel_timeidx,:,sel_levidx]
                if len(fname_list[1]): data.value2[t_idx,:,:] = data.value2[t_idx,:,:] + Dataset(fname_list[1][it],'r').variables[var_list[1]][sel_timeidx,:,sel_levidx]
                if len(fname_list[2]): data.value3[t_idx,:,:] = data.value3[t_idx,:,:] + Dataset(fname_list[2][it],'r').variables[var_list[2]][sel_timeidx,:,sel_levidx]
        
        # divide by loaded file number --> to final time mean
        if do_tmean:
            data.value = data.value/len(fname_list[0]) 
            if len(fname_list[1]): data.value2 = data.value2/len(fname_list[1]) 
            if len(fname_list[2]): data.value3 = data.value2/len(fname_list[2])
                
        #_______________________________________________________________________
        # compute potential density & temperatur if selected
        if any(x in data.var for x in ['pdens','ptemp','sigma']):
            dep      = np.matlib.repmat(mesh.zmid[sel_levidx],nsi,1)
            lat      = np.matlib.repmat(mesh.nodes_2d_yg[0:mesh.n2dn],nsi,1).transpose()
            press    = sw.pres(dep,lat)
            press_ref= 0
            if   '0' in data.var : press_ref=0
            elif '1' in data.var : press_ref=1000
            elif '2' in data.var : press_ref=2000
            elif '3' in data.var : press_ref=3000
            elif '4' in data.var : press_ref=4000
            elif '5' in data.var : press_ref=5000
            if press_ref!=0: data.lname = '$\sigma_{'+str(int(press_ref/1000))+'}$ '+data.lname
            del dep,lat
            if do_tmean:
                if 'ptemp' in data.var: data.value = sw.ptmp(data.value2,data.value,press,press_ref)
                if any(x in data.var for x in ['pdens','sigma']): data.value = sw.pden(data.value2,data.value,press,press_ref)-1000.025 
            else:
                for it in range(0,data.value.shape[0]):
                    if 'ptemp' in data.var: data.value[it,:,:] = sw.ptmp(data.value2[it,:,:],data.value[it,:,:],press,press_ref)
                    if any(x in data.var for x in ['pdens','sigma']): data.value[it,:,:] = sw.pden(data.value2[it,:,:],data.value[it,:,:],press,press_ref)-1000.025 
            fname_list[1]=[]
            
        #_______________________________________________________________________    
        # compute depth mean + linear interpolation to selected depth levels
        data.value = do_zinterp(mesh, data.value, data.depth, ndi, nsi, sel_levidx,do_output)
        if len(fname_list[1]): data.value2 = do_zinterp(mesh, data.value2, data.depth, ndi, nsi, sel_levidx,do_output)
        if len(fname_list[2]): data.value3 = do_zinterp(mesh, data.value3, data.depth, ndi, nsi, sel_levidx,do_output)
        
    # 2D data:
    else: 
        #_______________________________________________________________________
        # initialize data.value array
        if do_tmean:
            data.value = np.zeros((nsi,))
            if len(fname_list[1]): data.value2 = np.zeros((nsi,))
            if len(fname_list[1]): data.value3 = np.zeros((nsi,))
        else:
            data.value = np.zeros((nti,nsi))
            if len(fname_list[1]): data.value2 = np.zeros((nti*len(fname_list),nsi))
            if len(fname_list[1]): data.value3 = np.zeros((nti*len(fname_list),nsi))
        
        #_______________________________________________________________________
        # select time+depth range + compute time mean
        for it in range(0,len(fname_list[0])):
            if do_tmean:
                data.value = data.value + Dataset(fname_list[0][it],'r').variables[var_list[0]][sel_timeidx,:].mean(axis=0)
                if len(fname_list[1]): data.value2 = data.value2 + Dataset(fname_list[1][it],'r').variables[var_list[1]][sel_timeidx,:].mean(axis=0)
                if len(fname_list[2]): data.value3 = data.value3 + Dataset(fname_list[2][it],'r').variables[var_list[2]][sel_timeidx,:].mean(axis=0)
            else:
                t_idx = sel_timeidx+nti*it
                data.value[t_idx,:] = data.value[t_idx,:] + Dataset(fname_list[0][it],'r').variables[var_list[0]][sel_timeidx,:]
                if len(fname_list[1]): data.value2[t_idx,:] = data.value2[t_idx,:] + Dataset(fname_list[1][it],'r').variables[var_list[1]][sel_timeidx,:]
                if len(fname_list[2]): data.value3[t_idx,:] = data.value3[t_idx,:] + Dataset(fname_list[2][it],'r').variables[var_list[2]][sel_timeidx,:]
                
        # divide by loaded file number --> to final time mean
        if do_tmean:
            data.value = data.value/len(fname_list[0]) 
            if len(fname_list[1]): data.value2 = data.value2/len(fname_list[1]) 
            if len(fname_list[2]): data.value3 = data.value2/len(fname_list[2])
    
    return(data) 
示例#18
0
文件: roms.py 项目: paloczy/ap_tools
def energy_diagnostics(avgfile, grdfile, rho0=1025., gridid=None, maskfile='msk_shelf.npy', normalize=True, verbose=True):
	"""
	USAGE
	-----
	t, HKE, TPE = energy_diagnostics(avgfile, grdfile, rho0=1025., gridid=None, maskfile='msk_shelf.npy', normalize=True, verbose=True)

	Calculates volume-integrated Horizontal Kinetic Energy (HKE) and Total Potential Energy (TPE)
	within a control volume for each time record of a ROMS *.avg or *.his file.
	"""
	avg = Dataset(avgfile)

	print("Loading outputs and grid.")
	## Load domain mask.
	if maskfile:
		mask = np.load(maskfile)
		if type(mask[0,0])==np.bool_:
			pass
		else:
			mask=mask==1.

	## Getting mask indices.
	ilon,ilat = np.where(mask)

	## Getting velocity field.
	try:
		U = avg.variables['u']
		V = avg.variables['v']
		uvrho3 = False
	except KeyError:
		U = avg.variables['u_eastward']
		V = avg.variables['v_northward']
		uvrho3 = True

	## Get temp, salt.
	temp = avg.variables['temp']
	salt = avg.variables['salt']

	## Get grid, time-dependent free-surface and topography.
	zeta = avg.variables['zeta']
	grd = pyroms.grid.get_ROMS_grid(gridid, zeta=zeta, hist_file=avgfile, grid_file=grdfile)

	## Find cell widths at RHO-points.
	dx = grd.hgrid.dx             # Cell width in the XI-direction.
	dy = grd.hgrid.dy             # Cell width in the ETA-direction.
	if maskfile:
		dA = dx[mask]*dy[mask]
	else:
		dA = dx*dy

	## Get pres, g and pden (at ti=0).
	p0 = -grd.vgrid.z_r[0,:] # Approximation, for computational efficiency.

	if maskfile:
		g = grav(avg.variables['lat_rho'][:][mask])
	else:
		g = grav(avg.variables['lat_rho'][:])

	## Get time.
	t = avg.variables['ocean_time'][:]
	t = t - t[0]
	nt = t.size

	KE = np.array([])
	PE = np.array([])
	for ti in range(nt):
		tp = ti + 1
		print("")
		print("Processing time record %s of %s"%(tp,nt))

		print("Calculating density.")
		if maskfile:
			rho = pden(salt[ti,:,ilat,ilon],temp[ti,:,ilat,ilon],p0[:,ilat,ilon],pr=0.)
		else:
			rho = pden(salt[ti,:],temp[ti,:],p0,pr=0.)

		print("Loading velocities.")
		uu = U[ti,:]
		vv = V[ti,:]

		if not uvrho3:
			# Calculate u and v at PSI-points.
			u = 0.5*(uu[:,1:,:] + uu[:,:-1,:])
			v = 0.5*(vv[:,:,1:] + vv[:,:,:-1])
			# Calculate rho at PSI-points.
			rhop = 0.5*(rho[:,1:,:] + rho[:,:-1,:])
			rhop = 0.5*(rhop[:,:,1:] + rhop[:,:,:-1])
			if maskfile:
				u = u[:,ilat+1,ilon+1]
				v = v[:,ilat+1,ilon+1]
				rhop = rhop[:,ilat+1,ilon+1]
			else:
				pass
		else:
			# U and V both at RHO-points, no reshaping necessary.
			if maskfile:
				u = uu[:,ilat,ilon]
				v = vv[:,ilat,ilon]
			else:
				u = uu
				v = vv

		## Find cell depths, invert z-axis direction (downward).
		if maskfile:
			z = -grd.vgrid.z_r[ti,:,ilat,ilon]
		else:
			z = -grd.vgrid.z_r[ti,:]

		## Find cell heights.
		zw = grd.vgrid.z_w[ti,:]
		if maskfile:
			dz = zw[1:,ilat,ilon] - zw[:-1,ilat,ilon] # Cell height.
		else:
			dz = zw[1:,:] - zw[:-1,:]

		## Find cell volumes.
		dV = dA*dz # [m3]

		print("Squaring velocities.")
		u2v2 = u*u + v*v

		## Total Potential Energy (TPE) density relative to z=0 (energy/volume).
		pe = rho*g*z           # [J/m3]

		## Horizontal Kinetic Energy (HKE) density (energy/volume).
		if not uvrho3:
			ke = 0.5*rhop*u2v2 # [J/m3]
		else:
			ke = 0.5*rho*u2v2  # [J/m3]

		## Do volume integral to calculate TPE/HKE of the control volume.
		Pe = np.sum(pe*dV) # [J]
		Ke = np.sum(ke*dV) # [J]

		if normalize:
			Vol = dV.sum()
			Pe = Pe/Vol
			Ke = Ke/Vol
			if verbose and tp==1:
				print("")
				print("Total volume of the control volume is %e m3."%Vol)
				print("Normalizing TPE/HKE by this volume, i.e., mean TPE/HKE density [J/m3].")
				print("")

		if verbose:
			print("")
			if normalize:
				print("TPE/vol = %e J/m3"%Pe)
				print("HKE/vol = %e J/m3"%Ke)
			else:
				print("TPE = %e J"%Pe)
				print("HKE = %e J"%Ke)

		PE = np.append(PE, Pe)
		KE = np.append(KE, Ke)

	return t, KE, PE
示例#19
0
def adiabatic_level_sw(P,
                       S,
                       T,
                       lat,
                       bin_width=100.,
                       order=1,
                       ret_coefs=False,
                       cap=None):
    """Generate smooth buoyancy frequency profile by applying the adiabatic
    levelling method of Bray and Fofonoff (1981). This function uses the older
    theormodynamic toolbox, 'seawater'.

    Parameters
    ----------
    P : 1-D ndarray
        Pressure [dbar]
    S : 1-D ndarray
        Practical salinity [-]
    T : 1-D ndarray
        Temperature [degrees C]
    lat : float
        Latitude [-90...+90]
    bin_width : float, optional
        Pressure bin width [dbar]
    deg : int, optional
        Degree of polynomial fit. (DEGREES HIGHER THAN 1 NOT PROPERLY TESTED)
    ret_coefs : bool, optional
        Flag to return additional argument pcoefs. False by default.
    cap : optional
        Flag to change proceedure at ends of array where bins may be partially
        filled. None by default, meaning they are included. Can also specify
        'left', 'right' or 'both' to cap method before partial bins.

    Returns
    -------
    N2_ref : 1-D ndarray
        Reference buoyancy frequency [s-2]
    pcoefs : 2-D ndarray
        Fitting coefficients, returned only when the flag ret_coefs is set
        True.

    """
    valid = np.isfinite(P) & np.isfinite(S) & np.isfinite(T)
    valid = np.squeeze(np.argwhere(valid))
    P_, S_, T_ = P[valid], S[valid], T[valid]

    flip = False
    if (np.diff(P_) < 0).all():
        flip = True
        P_ = np.flipud(P_)
        S_ = np.flipud(S_)
        T_ = np.flipud(T_)
    elif (np.diff(P_) < 0).any():
        raise ValueError('P must be monotonically increasing/decreasing.')

    i1 = np.searchsorted(P_, P_ - bin_width / 2.)
    i2 = np.searchsorted(P_, P_ + bin_width / 2.)

    if cap is None:
        Nd = P_.size
    elif cap == 'both':
        icapl = i2[0]
        icapr = i1[-1]
    elif cap == 'left':
        icapl = i2[0]
        icapr = i2[-1]
    elif cap == 'right':
        icapl = i1[0]
        icapr = i1[-1]
    else:
        raise ValueError("The argument cap must be either None, 'both', 'left'"
                         " or 'right'")

    if cap is not None:
        i1 = i1[icapl:icapr]
        i2 = i2[icapl:icapr]
        valid = valid[icapl:icapr]
        Nd = icapr - icapl

    dimax = np.max(i2 - i1)

    Pb = np.full((dimax, Nd), np.nan)
    Sb = np.full((dimax, Nd), np.nan)
    Tb = np.full((dimax, Nd), np.nan)

    for i in range(Nd):
        imax = i2[i] - i1[i]
        Pb[:imax, i] = P_[i1[i]:i2[i]]
        Sb[:imax, i] = S_[i1[i]:i2[i]]
        Tb[:imax, i] = T_[i1[i]:i2[i]]

    Pbar = np.nanmean(Pb, axis=0)

    rho = sw.pden(Sb, Tb, Pb, Pbar)
    sv = 1. / rho

    rhobar = np.nanmean(rho, axis=0)

    p = np.full((order + 1, Nd), np.nan)

    for i in range(Nd):
        imax = i2[i] - i1[i]
        p[:, i] = np.polyfit(Pb[:imax, i], sv[:imax, i], order)

    g = sw.g(lat, -sw.dpth(Pbar, lat))
    # The factor 1e-4 is needed for conversion from dbar to Pa.
    N2 = -1e-4 * rhobar**2 * g**2 * p[order - 1, :]

    N2_ref = np.full_like(P, np.nan)
    pcoef = np.full((order + 1, P.size), np.nan)
    if flip:
        N2_ref[valid] = np.flipud(N2)
        pcoef[:, valid] = np.fliplr(p)
    else:
        N2_ref[valid] = N2
        pcoef[:, valid] = p

    if ret_coefs:
        return N2_ref, pcoef
    else:
        return N2_ref
示例#20
0
文件: roms.py 项目: paloczy/ap_tools
def pe(avgfile, grdfile, gridid=None, maskfile='/media/Armadillo/bkp/lado/MSc/work/ROMS/plot_outputs3/msk_shelf.npy', normalize=False, verbose=True):
	"""
	USAGE
	-----
	t, pe = pe(avgfile, grdfile, gridid=None, maskfile='/media/Armadillo/bkp/lado/MSc/work/ROMS/plot_outputs3/msk_shelf.npy', normalize=False, verbose=True):
	Calculates Potential Energy (PE) change integrated within a control volume
	for each time record of a ROMS *.avg or *.his file. The change is computed relative to
	the initial conditions, i.e., rhop(x,y,z,t=ti) = rho(x,y,z,t=ti) - rho(x,y,z,t=t0).

                                          [-g*(rhop^2)]
	PE = Integrated in a control volume V [-----------]     # [J]
                                          [ 2*drhodz  ]

	If 'normalize' is set to 'True', then PE/V (mean PE density [J/m3]) is returned instead.
	Reference:
	----------
	Cushman-Roisin (1994): Introduction to Geophysical Fluid Dynamics, page 213,
	Combination of Equations 15-29 and 15-30.
	"""
	print("Loading outputs and grid.")

	## Get outputs.
	avg = Dataset(avgfile)

	## Load domain mask.
	if maskfile:
		mask = np.load(maskfile)
		if type(mask[0,0])==np.bool_:
			pass
		else:
			mask=mask==1.

	## Getting mask indices.
	ilon,ilat = np.where(mask)

	## Get grid, time-dependent free-surface and topography.
	zeta = avg.variables['zeta']
	grd = pyroms.grid.get_ROMS_grid(gridid, zeta=zeta, hist_file=avgfile, grid_file=grdfile)

	## Get time.
	t = avg.variables['ocean_time'][:]
	t = t - t[0]
	nt = t.size

	## Get grid coordinates at RHO-points.
	lonr, latr = avg.variables['lon_rho'][:], avg.variables['lat_rho'][:]

	## Get grid spacings at RHO-points.
	## Find cell widths.
	dx = grd.hgrid.dx             # Cell width in the XI-direction.
	dy = grd.hgrid.dy             # Cell width in the ETA-direction.
	if maskfile:
		dA = dx[mask]*dy[mask]
	else:
		dA = dx*dy

	## Get temp, salt.
	temp = avg.variables['temp']
	salt = avg.variables['salt']

	## Find cell heights (at ti=0).
	zw = grd.vgrid.z_w[0,:]             # Cell depths (at ti=0).
	if maskfile:
		dz = zw[1:,ilat,ilon] - zw[:-1,ilat,ilon] # Cell height.
	else:
		dz = zw[1:,:] - zw[:-1,:]
	dz = 0.5*(dz[1:,:] + dz[:-1,:])               # Cell heights at W-points.

	## Get pres, g and pden (at ti=0).
	p0 = -zw # Approximation, for computational efficiency.
	p0 = 0.5*(p0[1:,:]+p0[:-1,:])

	if maskfile:
		rho0 = pden(salt[0,:,ilat,ilon],temp[0,:,ilat,ilon],p0[:,ilat,ilon],pr=0.)
	else:
		rho0 = pden(salt[0,:],temp[0,:],p0,pr=0.)

	if maskfile:
		g = grav(latr[mask])
	else:
		g = grav(latr)

	drho0 = rho0[1:,:] - rho0[:-1,:]
	rho0z = drho0/dz # Background potential density vertical gradient.

	PE = np.array([])
	for ti in range(nt):
		tp = ti + 1
		print("Processing time record %s of %s"%(tp,nt))

		if maskfile:
			rhoi = pden(salt[ti,:,ilat,ilon],temp[ti,:,ilat,ilon],p0[:,ilat,ilon],pr=0.)
		else:
			rhoi = pden(salt[ti,:],temp[ti,:],p0,pr=0.)

		rhop = rhoi - rho0                                  # Density anomaly, i.e., rho(x,y,z,t=ti) - rho(x,y,z,t=0)
		rhop = 0.5*(rhop[1:,:] + rhop[:-1,:])

		## Find cell heights.
		zw = grd.vgrid.z_w[ti,:]                      # Cell depths (at ti=0).
		if maskfile:
			dz = zw[1:,ilat,ilon] - zw[:-1,ilat,ilon] # Cell height.
		else:
			dz = zw[1:,:] - zw[:-1,:]

		## Find cell volumes.
		print(dx.shape,dy.shape,dz.shape)
		dV = dA*dz # [m3]
		dV = 0.5*(dV[1:,:]+dV[:-1,:])

		## Gravitational Available Potential Energy density (energy/volume).
		print(g.shape)
		print(rhop.shape)
		print(rho0z.shape)
		pe = -g*(rhop**2)/(2*rho0z) # [J/m3]

		## Do volume integral to calculate Gravitational Available Potential Energy of the control volume.
		Pe = np.sum(pe*dV) # [J]

		if normalize:
			V = dV.sum()
			Pe = Pe/V
			print("")
			print("Total volume of the control volume is %e m3."%V)
			print("Normalizing PE by this volume, i.e., mean PE density [J/m3].")
			print("")

		if verbose:
			if normalize:
				print("PE = %e J/m3"%Pe)
			else:
				print("PE = %e J"%Pe)

		PE = np.append(PE, Pe)

	return t, PE
示例#21
0
    T_clim_training[:, 0][:, np.newaxis], tos.shape[1], axis=1)
adt = adt - np.repeat(
    SH_clim_training[:, 0][:, np.newaxis], tos.shape[1], axis=1)
sos = sos - np.repeat(
    S_clim_training[:, 0][:, np.newaxis], tos.shape[1], axis=1)

####################################
# pre-process training data
####################################

P = np.zeros(T.shape)
delta_P = 10.
for i in range(P.shape[0]):
    P[i, :] = depth

D = sw.pden(S, T, P, pr=0)  #computes density profiles from in situ T and S
D_std = sw.pden(S * 0 + 35, T * 0, P,
                pr=0)  #computes standard density profiles
SVA = (1. / D)  #computes specific volume
SVA_std = (1. / D_std)  #computes specific volume standard profiles
g = 9.81

SH = np.zeros(T.shape)
for ik in range(T.shape[1]):
    SH[:, ik] = 1e6 * np.sum(SVA[:, ik:T.shape[1]],
                             axis=1) * delta_P / g  #steric heights in cm
    SH[:, ik] = SH[:, ik] - 1e6 * np.sum(SVA_std[:, ik:T.shape[1]],
                                         axis=1) * delta_P / g

T = T - T_clim_training
S = S - S_clim_training
示例#22
0
文件: oceans.py 项目: dcherian/dcpy
def TSplot(
    S,
    T,
    Pref=0,
    size=None,
    color=None,
    ax=None,
    rho_levels=[],
    labels=True,
    label_spines=True,
    plot_distrib=True,
    Sbins=30,
    Tbins=30,
    plot_kwargs=None,
    hexbin=True,
    fontsize=9,
    kind=None,
    equalize=True,
):
    """
    T-S plot. The default is to scatter, but hex-binning is also an option.

    Parameters
    ----------
    S, T : float32
        Salinity, temperature.
    Pref : float32, optional
        Reference pressure level.
    size: int, numpy.ndarray, xr.DataArray
        Passed to scatter.
    color: string, numpy.ndarray, xr.DataArray
        Passed as 'c' to scatter and 'C' to hexbin.
    ax : optional, matplotlib.Axes
        Axes to plot to.
    rho_levels : optional
        Density contour levels.
    labels : bool, optional
        Label density contours?
    label_spines : bool, optional
        Fancy spine labelling inspired by Arnold Gordon's plots.
    fontsize: int, optional
        font size for labels
    plot_distrib : bool, optional
        Plot marginal distributions of T, S?
    Sbins, Tbins : int, optional
        Number of T, S bins for marginal distributions.
    hexbin : bool, optional
        hexbin instead of scatter plot?
    plot_kwargs: dict, optional
        extra kwargs passed directly to scatter or hexbin. Cannot contain
        's', 'c' or 'C'.

    Returns
    -------
    handles: dict,
        cs : Handle to density ContourSet.
        ts : Handle to T-S scatter
        Thist, Shist : handles to marginal distributions
    axes : list of Axes.
    """

    # colormap = cmo.cm.matter
    #
    if kind is None:
        if hexbin is True:
            kind = "hexbin"
        elif hexbin is False:
            kind = "scatter"

    if plot_kwargs is None:
        plot_kwargs = {}
    if any([kw in plot_kwargs for kw in ["c", "C", "s"]]):
        raise ValueError(
            "plot_kwargs cannot contain c, C, or s. "
            + "Please specify size or color as appropriate."
        )

    scatter_defaults = {"edgecolors": None, "alpha": 0.5}

    labels = False if rho_levels is None else labels

    axes = dict()
    handles = dict()

    if ax is None:
        f = plt.figure(constrained_layout=True)
        if plot_distrib:
            gs = mpl.gridspec.GridSpec(5, 5, figure=f)

            axes["ts"] = f.add_subplot(gs[1:, :-1])
            axes["s"] = f.add_subplot(gs[0, :-1], sharex=axes["ts"])
            axes["t"] = f.add_subplot(gs[1:, -1], sharey=axes["ts"])
            ax = axes["ts"]
        else:
            ax = plt.gca()
    elif isinstance(ax, dict):
        axes = ax
        ax = axes["ts"]
    axes["ts"] = ax

    nanmask = np.isnan(S.values) | np.isnan(T.values)
    if size is not None:
        nanmask = nanmask | np.isnan(size)
    if color is not None and not isinstance(color, str):
        nanmask = nanmask | np.isnan(color)
    if len(np.atleast_1d(Sbins)) > 1:
        nanmask = nanmask | (S.values < np.min(Sbins)) | (S.values > np.max(Sbins))
    if len(np.atleast_1d(Tbins)) > 1:
        nanmask = nanmask | (T.values < np.min(Tbins)) | (T.values > np.max(Tbins))

    salt = _flatten_data(S.where(~nanmask))
    temp = _flatten_data(T.where(~nanmask))
    if size is not None and hasattr(size, "values"):
        size = _flatten_data(size.where(~nanmask))
    if color is not None and hasattr(color, "values"):
        color = _flatten_data(color.where(~nanmask))

    # TODO: plot outliers separately with hexbin
    # _prctile = 2
    # outlierT = np.percentile(temp, [_prctile, 100 - _prctile])
    # outlierS = np.percentile(salt, [_prctile, 100 - _prctile])

    # outliermask = np.logical_or(
    #     np.logical_or(salt > outlierS[1], salt < outlierS[0]),
    #     np.logical_or(temp > outlierT[1], temp < outlierT[0]))

    if kind == "hexbin":

        hexbin_defaults = {"cmap": mpl.cm.Blues, "mincnt": 1}
        if not isinstance(color, str):
            hexbin_defaults["C"] = color
        hexbin_defaults.update(plot_kwargs)

        handles["ts"] = ax.hexbin(
            salt, temp, gridsize=(Sbins, Tbins), **hexbin_defaults
        )
        # ax.plot(salt[outliermask], temp[outliermask], '.', 'gray')
        #
    elif kind == "hist":
        from xhistogram.core import histogram

        if isinstance(Sbins, int):
            Sbins = np.linspace(S.data.min(), S.data.max(), Sbins)
        if isinstance(Tbins, int):
            Tbins = np.linspace(T.data.min(), T.data.max(), Tbins)
        hist = histogram(salt, temp, bins=(Sbins, Tbins))
        if equalize:
            from skimage.exposure import equalize_adapthist

            hist = equalize_adapthist(hist.data)
        hist = hist.astype(float)
        # print(np.percentile(hist.ravel(), 10))
        hist[hist < np.percentile(hist[hist > 0].ravel(), 10)] = np.nan
        handles["ts"] = ax.pcolormesh(Sbins, Tbins, hist.T, **plot_kwargs)

    elif kind == "scatter":
        scatter_defaults.update(plot_kwargs)
        handles["ts"] = ax.scatter(
            salt,
            temp,
            s=size if size is not None else 12,
            c=color,
            **scatter_defaults,
        )

    # defaults.pop('alpha')
    # ts = ax.scatter(flatten_data(S), flatten_data(T),
    #                 s=flatten_data(size), c=[[0, 0, 0, 0]],
    #                 **defaults)

    if rho_levels is not None:
        Slim = ax.get_xlim()
        Tlim = ax.get_ylim()

        Tvec = np.linspace(Tlim[0], Tlim[1], 40)
        Svec = np.linspace(Slim[0], Slim[1], 40)
        [Smat, Tmat] = np.meshgrid(Svec, Tvec)

        # background ρ contours are T, S at the reference level
        ρ = sw.pden(Smat, Tmat, Pref, Pref) - 1000

        rho_levels = np.asarray(rho_levels)
        if np.all(rho_levels > 1000):
            rho_levels = rho_levels - 1000
        if not (rho_levels.size > 0):
            rho_levels = 7

        handles["rho_contours"] = ax.contour(
            Smat,
            Tmat,
            ρ,
            colors="gray",
            levels=rho_levels,
            linestyles="solid",
            zorder=-1,
            linewidths=0.5,
        )

        ax.set_xlim([Smat.min(), Smat.max()])
        ax.set_ylim([Tmat.min(), Tmat.max()])

    if plot_distrib:
        hist_args = dict(color=color, density=True, histtype="step")
        handles["Thist"] = axes["t"].hist(
            temp, orientation="horizontal", bins=Tbins, **hist_args
        )
        axes["t"].set_xticklabels([])
        axes["t"].set_xticks([])
        axes["t"].spines["bottom"].set_visible(False)

        handles["Shist"] = axes["s"].hist(salt, bins=Sbins, **hist_args)
        axes["s"].set_yticks([])
        axes["s"].set_yticklabels([])
        axes["s"].spines["left"].set_visible(False)

    if labels:
        if label_spines:
            plots.contour_label_spines(
                handles["rho_contours"], fmt="%.1f", fontsize=fontsize
            )
        else:
            clabels = ax.clabel(
                handles["cs"], fmt="%.1f", inline=True, inline_spacing=10
            )
            [txt.set_backgroundcolor([0.95, 0.95, 0.95, 0.75]) for txt in clabels]

        ax.text(
            0,
            1.005,
            " $σ_" + str(Pref) + "$",
            transform=ax.transAxes,
            va="bottom",
            fontsize=fontsize + 2,
            color="gray",
        )

    ax.spines["right"].set_visible(True)
    ax.spines["top"].set_visible(True)

    ax.set_xlabel("S")
    ax.set_ylabel("T")

    return handles, axes