/
TIL.py
373 lines (307 loc) · 12.6 KB
/
TIL.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
# Module TIL:
# this module holds Python routines for diagnosing the tropopause inversion layer
# (TIL) in atmosphere models with DART data assimilation
# Lisa Neef, 27 Jun 2016
# load the required packages
import numpy as np
import os.path
from netCDF4 import Dataset
import DART_state_space as DSS
import DART as dart
def Nsq_forcing_from_RC(E,datetime_in=None,debug=False,hostname='taurus'):
"""
Birner (2010) used the thermodynamic equation in the TEM form to derive an expression
for the rate of change of static stability (N2) due to residual motion and diabatic heating.
This subroutine compares those terms from the dynamical heating rates computed by Wuke Wang.
The vertical motion (wstar) term is -d(wsar*Nsq)/dz.
Wuke already computed WS = -wstar*HNsq/R, so it's easiest to load that data, divide out H and R, and then take the vertical gradient.
The horizontal term is -g d(vstar/theta * d(theta)dy)/dz.
Wuke already computed the heating rate term v*dtheta/dy = v*dTdy,
so the easiest thing to do is to multiply the heating rates by g/theta
and then take the vertical gradient.
INPUTS:
E: a DART experiment dictionary. Relevant fields are:
E['exp_name'] - the experiment name
E['daterange'] - helps to choose which date to load in case this isn't specifically given
E['variable'] - if this is set to N2_forcing_vstar, the code returns the N2 forcing due to
meridional residual circulation. For anything else, it returns the forcing
due to vertical residual circulation.
datetime_in: the date for which we want to compute this diagnostic.
default is None -- in this case, just choose the fist date in E['daterange']
OUTPUTS:
N2_forcing: Nsquared forcing term in s^2/day
lev
lat
"""
# necessary constants
H=7000.0 # scale height in m
g = 9.80
p0=1000.0 # reference pressure in hPa
if datetime_in is None:
datetime_in = E['daterange'][0]
# depending on which term we want, need to load the residual circulation component and some other stuff,
# and then derive a quantity for which we take the vertical gradient
ERC = E.copy()
ET=E.copy()
if E['variable'] == 'Nsq_vstar_forcing':
ET['variable']='theta'
Dtheta = dart.load_DART_diagnostic_file(ET,datetime_in,hostname=hostname,debug=debug)
theta=Dtheta['data']
lat=Dtheta['lat']
lon=Dtheta['lon']
lev=Dtheta['lev']
ERC['variable']='VSTAR'
Dvstar = DSS.compute_DART_diagn_from_Wang_TEM_files(ERC,datetime_in,hostname=hostname,debug=debug)
vstar = Dvstar['data']
# the above routines do not return arrays of consistent shape, so have to do
# some acrobatics to get everything to match up.
# find how the dimensions fit to the shape
nlon=len(lon)
nlat=len(lat)
nlev=len(lev)
for idim,s in enumerate(theta.shape):
if s==nlon:
londim=idim
latdim=idim
levdim=idim
# take the zonal mean of potential temp - this should make its shape copy x lat x lev
thetam = np.average(theta,axis=londim)
# next step is to find the meridional gradient of theta
# latitude steps --> convert to distance (arclength)
rlat = np.deg2rad(lat)
Re = 6371000.0 # radius of Earth in m
y = Re*rlat
dy = np.gradient(y)
# need to replicate dy to suit the shape of zonal mean theta
dym = dy[None,:,None]
dy3 = np.broadcast_to(dym,thetam.shape)
# here is the gradient - need to squeeze out a possible length-1
# copy dimension
dthetady_list = np.gradient(np.squeeze(thetam),np.squeeze(dy3))
# now find which dimension of _squeezed_ thetam corresponds to latitude -
# that's the gradient that we want
# (is this a pain in the ass? Yes! But I haven't yet found a more clever approach)
for idim,s in enumerate(np.squeeze(thetam).shape):
if s==nlat:
newlatdim=idim
dthetady = dthetady_list[newlatdim]
# the meridional gradient of zonal mean theta then gets multiplied by vstar and g/theta. But...
# the subroutine compute_DART_diagn_from_Wang_TEM_files delivers an array with
# dimensions lev x lat x copy (or just levxlat)
# whereas N2 should come out as copy x lat x lev (or simply lat x lev)
# need to transpose this, but I don't trust np.reshape - do it manually
vstar2 = np.zeros(shape=dthetady.shape)
if vstar2.ndim==3:
for icopy in range(dthetady.shape[0]):
for ilat in range(dthetady.shape[1]):
for ilev in range(dthetady.shape[2]):
vstar2[icopy,ilat,ilev]=vstar[icopy,ilev,ilat]
else:
for ilat in range(dthetady.shape[0]):
for ilev in range(dthetady.shape[1]):
vstar2[ilat,ilev]=vstar[ilev,ilat]
X = (g/np.squeeze(thetam))*vstar2*dthetady
else:
ET['variable']='Nsq'
D = dart.load_DART_diagnostic_file(ET,datetime_in,hostname=hostname,debug=debug)
Nsq=D['data']
lat = D['lat']
lon = D['lon']
lev = D['lev']
ERC['variable']='WSTAR'
Dwstar = DSS.compute_DART_diagn_from_Wang_TEM_files(ERC,datetime_in,hostname=hostname,debug=debug)
wstar=Dwstar['data']
# find how the dimensions fit to the shape
nlon=len(lon)
nlat=len(lat)
nlev=len(lev)
for idim,s in enumerate(Nsq.shape):
if s==nlon:
londim=idim
latdim=idim
levdim=idim
# take the zonal mean of buoyancy frequency
Nsqm = np.average(Nsq,axis=londim)
# might have to squeeze out a length-1 copy dimension
Nsqm2 = np.squeeze(Nsqm)
# the subroutine compute_DART_diagn_from_Wang_TEM_files delivers an array with dimensions lev x lat x copy (or just levxlat)
# whereas N2 should come out as copy x lat x lev (or simply lat x lev)
# need to transpose this, but I don't trust np.reshape - do it manually
wstar2 = np.zeros(shape=Nsqm2.shape)
if wstar2.ndim==3:
for icopy in range(Nsqm2.shape[0]):
for ilat in range(Nsqm2.shape[1]):
for ilev in range(Nsqm2.shape[2]):
wstar2[icopy,ilat,ilev]=wstar[icopy,ilev,ilat]
else:
for ilat in range(Nsqm2.shape[0]):
for ilev in range(Nsqm2.shape[1]):
wstar2[ilat,ilev]=wstar[ilev,ilat]
X = Nsqm2*wstar2
# convert pressure levels to approximate altitude and take the vertical gradient
zlev = H*np.log(p0/lev)
dZ = np.gradient(zlev) # gradient of vertical levels in m
# now X *should* have shape (copy x lat x lev) OR (lat x lev)
# so need to copy dZ to look like this
if X.ndim==3:
dZm = dZ[None,None,:]
levdim=2
if X.ndim==2:
dZm = dZ[None,:]
levdim=1
dZ3 = np.broadcast_to(dZm,X.shape)
dXdZ_3D = np.gradient(X,dZ3)
dxdz = dXdZ_3D[levdim] # this is the vertical gradient with respect to height
# the above calculation yields a quantity in units s^-2/s, but it makes more sense
# in the grand scheme of things to look at buoyancy forcing per day, so here
# is a conversion factor.
seconds_per_day = 60.*60.*24.0
N2_forcing = -dxdz*seconds_per_day
D = dict()
D['data']=N2_forcing
D['lat']=lat
D['lev']=lev
D['units']='s^{-2}/day'
D['long_name']='N^{2} Forcing'
return D
def Nsq_forcing_from_Q(E,datetime_in=None,debug=False,hostname='taurus'):
"""
Birner (2010) used the thermodynamic equation in the TEM form to derive an expression
for the rate of change of static stability (N2) due to residual motion and diabatic heating.
This subroutine compares the term due to diabatic heating, i.e.:
g d(Q/theta)dz
INPUTS:
E: a DART experiment dictionary. Relevant fields are:
E['exp_name'] - the experiment name
E['daterange'] - helps to choose which date to load in case this isn't specifically given
E['variable'] - this determines what kind of diabatic heating we use:
the value of E['variable'] should be a string like 'Nsq_forcing_XXXXX'
where XXXXX is the model variable corresponding to whatever diabatic
heating type we are looking for.
For example, in WACCM, 'QRL_TOT' is the total longwave heating, so to get the
N2 forcing from that, just set E['variable']='Nsq_forcing_QRL_TOT'
datetime_in: the date for which we want to compute this diagnostic.
default is None -- in this case, just choose the fist date in E['daterange']
OUTPUTS:
N2_forcing: Nsquared forcing term in s^2/day
lev
lat
"""
# necessary constants
H=7000.0 # scale height in m
p0=1000.0 # reference pressure in hPa
g=9.8 # acceleration of gravity
# load the desired diabatic heating term
# this is not typically part of the DART output, so load from model history files
# (right now this really only works for WACCM/CAM)
Qstring = E['variable'].strip('Nsq_forcing_')
EQ = E.copy()
EQ['variable']=Qstring
DQ = DSS.compute_DART_diagn_from_model_h_files(EQ,datetime_in,verbose=debug)
# remove the time dimension, which should have length 1
DQ['data'] = np.squeeze(DQ['data'])
# also load potential temperature
ET = E.copy()
ET['variable']='theta'
Dtheta = dart.load_DART_diagnostic_file(ET,datetime_in,hostname=hostname,debug=debug)
# squeeze out extra dims, which we get if we load single copies (e.g. ensemble mean)
Dtheta['data'] = np.squeeze(Dtheta['data'])
# now find the longitude dimension and average over it
# for both Q and theta
Q_mean = DSS.average_over_named_dimension(DQ['data'],DQ['lon'])
theta_mean = DSS.average_over_named_dimension(Dtheta['data'],Dtheta['lon'])
# if the shapes don't match up, might have to transpose one of them
# if Mean_arrays[1].shape[0] != Q_mean.shape[0]:
# theta_mean=np.transpose(Mean_arrays[1])
# else:
# theta_mean=Mean_arrays[1]
# Q_mean should come out as copy x lev x lat, whereas theta_mean is copy x lat x lev
# to manually transpose Q_mean
Q_mean2 = np.zeros(shape=theta_mean.shape)
if Q_mean2.ndim==3:
for icopy in range(theta_mean.shape[0]):
for ilat in range(theta_mean.shape[1]):
for ilev in range(theta_mean.shape[2]):
Q_mean2[icopy,ilat,ilev]=Q_mean[icopy,ilev,ilat]
else:
for ilat in range(theta_mean.shape[0]):
for ilev in range(theta_mean.shape[1]):
Q_mean2[ilat,ilev]=Q_mean[ilev,ilat]
# divide Q by theta
X = Q_mean2/theta_mean
# convert pressure levels to approximate altitude and take the vertical gradient
lev=DQ['lev']
zlev = H*np.log(p0/lev)
dZ = np.gradient(zlev) # gradient of vertical levels in m
# now X *should* have shape (copy x lat x lev) OR (lat x lev)
# so need to copy dZ to look like this
if X.ndim==3:
dZm = dZ[None,None,:]
levdim=2
if X.ndim==2:
dZm = dZ[None,:]
levdim=1
dZ3 = np.broadcast_to(dZm,X.shape)
dXdZ_3D = np.gradient(X,dZ3)
dxdz = dXdZ_3D[levdim] # this is the vertical gradient with respect to height
# the above calculation yields a quantity in units s^-2/s, but it makes more sense
# in the grand scheme of things to look at buoyancy forcing per day, so here
# is a conversion factor.
seconds_per_day = 60.*60.*24.0
# now loop over ensemble members and compute the n2 forcing for each one
N2_forcing = g*dxdz*seconds_per_day
D = dict()
D['data']=N2_forcing
D['lev']=DQ['lev']
D['lat']=DQ['lat']
D['units']='s^{-2}/day'
D['long_name']='N^{2} Forcing'
return D
def ztrop(z,T,hostname='taurus',debug=False):
"""
Given 1-D arrays of altitude and temperature, compute the lapse-rate tropopause follwing the WMO citerion.
Additionally, can choose to ignore values below a certain altitude, to avoid erroneously choosing a too-low
tropopause when a meteorological disturbance causes the lapse rate to fall closer to zero.
Note that z and T have to be in km and Kelvin, respectively.
"""
# first define tropopause height as nothing
ztrop=None
# compute the lapse rate
dZ = np.gradient(z)
LR = -np.gradient(T,dZ)
# now loop through lapse-rates, and if it falls below the 2K/km boundary, see if the WMO criterion is met
for ll,zz in zip(LR,z):
if ll<2.0:
zz_upper = zz+2.0
upper_neighbors = np.where(np.logical_and(z>=zz, z<=zz_upper))
LRtest = np.mean(LR[upper_neighbors])
# if this average number is within 2K/km, we're done
if (LRtest<2.0) and (zz>6.0):
ztrop = zz
break
return(ztrop)
def Nsq(T,z,p=None):
"""
This is a simple subroutine that computes the buoyancy frequency from 1D arrays of temperature and altitude.
INPUTS:
T: a temperature profile in Kelvin
z: a vector of altitudes in km
p: a vector of pressures in hPa
If pressure is also giveni (must be in hPa), that makes the calculation slightly easier, but it's optional.
"""
P0=1000.0
Rd = 286.9968933 # Gas constant for dry air J/degree/kg
g = 9.80616 # Acceleration due to gravity m/s^2
cp = 1005.0 # heat capacity at constant pressure m^2/s^2*K
H = 7.0 # scale height - 7.0km
# is a pressure profile given?
if p is None:
p = P0*np.exp(-z/H)
# compute potential temperature
theta = T*(P0/p)**(Rd/cp)
# compute vertical gradient of pot. temp.
# note that this includes a conversion of altitude from km to meters
dZ = np.gradient(z*1.0E3)
dthetadZ = np.gradient(theta,dZ)
N2=(g/theta)*dthetadZ
return(N2)