예제 #1
0
# # Introduction
#
# This notebook picks off the point at which the buoyancy flux crosses the 0 point -- i.e. the bottom of the entrainment layer

# %%
import urllib, os
from matplotlib import pyplot as plt
from netCDF4 import Dataset
import numpy as np
import a500
import context

from a500.utils.data_read import download
the_root = "http://clouds.eos.ubc.ca/~phil/docs/atsc500/data/"
the_file = 'case_60_10.nc'
out = download(the_file, root=the_root, dest_folder=a500.data_dir)


def make_theta(temp, press):
    """
      temp in K
      press in Pa
      returns theta in K
    """
    p0 = 1.e5
    Rd = 287.  #J/kg/K
    cpd = 1004.  #J/kg/K
    theta = temp * (p0 / press)**(Rd / cpd)
    return theta

예제 #2
0
import glob

# %%
# !conda install -y xarray

# %%
#get 10 files, each is the same timestep for a member of a
#10 member ensemble
import numpy as np
from a500.utils.data_read import download
root='http://clouds.eos.ubc.ca/~phil/docs/atsc500/dryles'
for i in np.arange(1,11,dtype=np.int):
    the_name='mar12014_{:d}_15600.nc'.format(i)
    print(the_name)
    url='{}/{}'.format(root,the_name)
    download(the_name,root=root)

# %% [markdown]
# Useful links:
#
# We need:
#
# * [namedtuples](https://docs.python.org/3/library/collections.html#collections.namedtuple)
#
# * [numpy.savez](http://docs.scipy.org/doc/numpy/reference/generated/numpy.savez.html)
#
# * [vapor](https://www.vapor.ucar.edu/)
#
# * [vapor videos](https://www.youtube.com/channel/UCVSuoneyeZFn4Znxl_jJ70A)
#
# * [xarray and dask](https://www.continuum.io/content/xray-dask-out-core-labeled-arrays-python)
예제 #3
0
# In[1]:


from matplotlib import pyplot as plt
from netCDF4 import Dataset
import numpy as np


# In[2]:


from  a500.utils.data_read import download
the_root="http://clouds.eos.ubc.ca/~phil/docs/atsc500/data/"
the_file='case_60_10.nc'
out=download(the_file,root=the_root)


# # Working with groups
# 
# the Dataset object has a method call groups that returns a dictionary with all groups
# 
# Similarly, each group object has a method called variables that returns a dictionary of all variables
# 
# So to get the shape of the wvel array for each group member, do something like this:

# In[3]:


case_name='case_60_10.nc'
#
예제 #4
0
# %%
#title here
import numpy as np
import matplotlib.pyplot as plt
import context
from a500.utils.data_read import download

# %% [markdown]
# # Power spectrum of turbulent vertical velocity

# %%
#load data sampled at 20.8333 Hz

download('miami_tower.npz',
         root='http://clouds.eos.ubc.ca/~phil/docs/atsc500/data',
         dest_folder=context.data_dir)
td = np.load(context.data_dir /
             'miami_tower.npz')  #load temp, uvel, vvel, wvel, minutes
print('keys: ', list(td.keys()))
print(td['description'])

# %% [markdown]
# ## Step 1: plot the raw spectrum and confirm Parseval's theorem (Stull 8.6.2a, p. 313)
#
# See also [Numerical Recipes Chapter 12/13](http://clouds.eos.ubc.ca/~phil/docs/atsc500/pdf_files/numerical_recipes_fft.pdf)
# (user: green, password: house) page 492, equation 12.0.12 and 12.0.13.  We want to demonstrate that
#
#
# $$
# \operatorname{Corr}(g, g) \Longleftrightarrow|G(f)|^{2}
예제 #5
0
    'cesar_nubiscope_cloudcover_la1_t10_v1.0_201412.nc',
    'cesar_surface_flux_lc1_t10_v1.0_201407.nc',
    'cesar_surface_flux_lc1_t10_v1.0_201412.nc',
    'cesar_surface_meteo_lc1_t10_v1.0_201407.nc',
    'cesar_surface_meteo_lc1_t10_v1.0_201412.nc',
    'cesar_tower_meteo_lb1_t10_v1.1_201407.nc',
    'cesar_tower_meteo_lb1_t10_v1.1_201412.nc'
]

# In[3]:

data_download = True
if data_download:
    for the_file in filelist:
        root = 'http://clouds.eos.ubc.ca/~phil/docs/atsc500/cabauw'
        download(the_file, root=root)

# The make_date function takes an open CESAR ncfile and turns the time
# vector from hours past the start date into a python datetime object
# in UTC.  It uses the dateutil parse function to turn the date_start_of_data string into a date.
#
# This function is used in the cells below to create two dictionaries:
#
# 1. **data_dict** with six keys for the 3 file/2 month combinations
#    with each (file,month) entry holding a dictionary
#    with the numpy array data for the file variables (F, UST, etc.)
#
# 2. **var_attr** with a key for each variable holding dictionaries with
#    the variable attributes (units, long name, etc.)

# In[4]:
예제 #6
0
# In[1]:

import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

# In[2]:

from matplotlib import pyplot as plt
import urllib
import os
from a500.utils.data_read import download

data_download = True
satfile = 'a17.nc'
download('satfile', root='http://clouds.eos.ubc.ca/~phil/docs/atsc500/data')

# In[3]:

from netCDF4 import Dataset
with Dataset(satfile) as nc:
    tau = nc.variables['tau'][...]

# ## Character of the optical depth field
#
# The image below shows one of the marine boundary layer landsat scenes analyzed in
# [Lewis et al., 2004](http://onlinelibrary.wiley.com/doi/10.1029/2003JD003742/full)
#
# It is a 2048 x 2048 pixel image taken by Landsat 7, with the visible reflectivity converted to
# cloud optical depth.   The pixels are 25 m x 25 m, so the scene extends for about 50 km x 50 km
예제 #7
0
warnings.filterwarnings("ignore", category=FutureWarning)
import context
import pdb

# %%
from matplotlib import pyplot as plt
import urllib
import os
from a500.utils.data_read import download

data_download = True
satfile = "a17.nc"
download(
    satfile,
    root="http://clouds.eos.ubc.ca/~phil/docs/atsc500/data",
    dest_folder=context.data_dir,
)

# %%
from netCDF4 import Dataset

with Dataset(context.data_dir / satfile) as nc:
    tau = nc.variables["tau"][...]

# %% [markdown]
# ## Character of the optical depth field
#
# The image below shows one of the marine boundary layer landsat scenes analyzed in
# [Lewis et al., 2004](http://onlinelibrary.wiley.com/doi/10.1029/2003JD003742/full)
#
예제 #8
0
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Using-the-fft-to-compute-correlation" data-toc-modified-id="Using-the-fft-to-compute-correlation-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Using the fft to compute correlation</a></span></li></ul></div>

# # Using the fft to compute correlation
#
# Below I use aircraft measurments of $\theta$ and wvel taken at 25 Hz.  I compute the
# autocorrelation using numpy.correlate and numpy.fft and show they are identical, as we'd expect

# In[1]:

from matplotlib import pyplot as plt
from a500.utils.data_read import download
plt.style.use('ggplot')
import urllib
import os
download('aircraft.npz',
         root='http://clouds.eos.ubc.ca/~phil/docs/atsc500/data')

# In[2]:

#http://stackoverflow.com/questions/643699/how-can-i-use-numpy-correlate-to-do-autocorrelation
import numpy as np
data = np.load('aircraft.npz')
wvel = data['wvel'] - data['wvel'].mean()
theta = data['theta'] - data['theta'].mean()
autocorr = np.correlate(wvel, wvel, mode='full')
auto_data = autocorr[wvel.size:]
ticks = np.arange(0, wvel.size)
ticks = ticks / 25.
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.set(xlabel='lag (seconds)',
       title='autocorrelation of wvel using numpy.correlate')
예제 #9
0
from a500.utils.data_read import download
from pathlib import Path

# %% [markdown]
# ## Download toy model data

# %%
# get 10 files, each is the same timestep for a member of a
# 10 member ensemble

root = "http://clouds.eos.ubc.ca/~phil/docs/atsc500/small_les"
for i in range(10):
    the_name = f"mar12014_{(i+1):d}_15600.nc"
    print(the_name)
    url = "{}/{}".format(root, the_name)
    download(the_name, root=root, dest_folder=context.data_dir)

# %%
# get 10 files, each is the same timestep for a member of a
# 10 member ensemble

the_files = context.data_dir.glob("mar12*nc")
the_files = list(the_files)
the_files

# %% [markdown]
# ## Sort in numeric order

# %%

예제 #10
0
# Check Stull 8.6.1b (or Numerical Recipes 12.0.13) which says that squared power spectrum = variance
#

# In[7]:

print('\nsimple cosine: velocity variance %10.3f' %
      (np.sum(onehz * onehz) / totsize))
print('simple cosine: Power spectrum sum %10.3f\n' %
      (np.sum(Power) / totsize**2.))

# ## Power spectrum of turbulent vertical velocity

# In[3]:

#load data sampled at 20.8333 Hz
download('miami_tower.npz',
         root='http://clouds.eos.ubc.ca/~phil/docs/atsc500/data')
td = np.load('miami_tower.npz')  #load temp, uvel, vvel, wvel, minutes
print('keys: ', td.keys())
print(td['description'])

# In[4]:

sampleRate = 20.833
nyquistfreq = sampleRate / 2.

totsize = 36000
wvel = td['wvel'][0:totsize].flatten()
temp = td['temp'][0:totsize].flatten()
wvel = wvel - np.mean(wvel)
temp = temp - np.mean(temp)
flux = wvel * temp