def get_available_date_range(exp_name):

	"""
	given some existing DART experiment, return the daterange of all currently available data 
	"""

	N = {'W0910_GLOBAL' : dart.daterange(date_start=datetime.datetime(2009,10,1,0,0,0), periods=380, DT='6H'),
		'W0910_NODA' :dart.daterange(date_start=datetime.datetime(2009,10,1,0,0,0), periods=640, DT='6H'),
	}
	return N[exp_name]
Beispiel #2
0
def get_available_date_range(exp_name):
    """
	given some existing DART experiment, return the daterange of all currently available data 
	"""

    N = {
        'W0910_GLOBAL':
        dart.daterange(date_start=datetime.datetime(2009, 10, 1, 0, 0, 0),
                       periods=380,
                       DT='6H'),
        'W0910_NODA':
        dart.daterange(date_start=datetime.datetime(2009, 10, 1, 0, 0, 0),
                       periods=640,
                       DT='6H'),
    }
    return N[exp_name]
Beispiel #3
0
def compute_climate_indices(E,
                            index_name,
                            climatology_option='NODA',
                            hostname='taurus',
                            verbose=False):
    """
	This subroutine computes various simple climate indices for a dataset 
	defined by an experiment dictionary.  

	Currently supporting the following indices:  
	+ 'Aleutian Low' index of Garfinkel et al. (2010)
	+ 'East European High' index of Garfinkel et al. (2010)
	+ 'AO Proxy' -- Polar Cap GPH Anomaly at 500hPa -- it's a  proxy for the AO suggested by Cohen et al. (2002)   
		* note however that we define the polar cap as everything north of 70N, I think Cohen et al do 60N
	+ 'Vortex Strength' -- Polar Cap GPH Anomaly averaged 3-30hPa -- it's a measure of vortex strength suggested by Garfinkel et al. 2012

	"""

    # modify the experiment dictionary to retrieve the right index
    EI = dart.climate_index_dictionaries(index_name)
    E['levrange'] = EI['levrange']
    E['latrange'] = EI['latrange']
    E['lonrange'] = EI['lonrange']
    E['variable'] = EI['variable']

    # for all indices defined so far, compute the anomaly
    # with respect to climatology
    # this uses an anomaly subroutine from the MJO module
    A, C, lat, lon, lev = mjo.ano(E,
                                  climatology_option=climatology_option,
                                  verbose=verbose)

    # Aleutian Low and East European high indices are single points, so just return the anomaly
    if (index_name == 'Aleutian Low') or (index_name == 'East European High'):
        index_out = A

    # for the Polar Cap GPH -based indices, average over latitude and longitude
    # here can use a subroutine written for MJO stuff in the MJO module
    if (index_name == 'AO Proxy') or (index_name == 'Vortex Strength'):
        lat1, lon1, Aave = mjo.aave(E,
                                    A,
                                    lat,
                                    lon,
                                    season=None,
                                    variable_name=None,
                                    averaging_dimension='all')

    # for the AO proxy, reverse the sign so that it's more intuitive -- a positive GPH anomaly is related to a negative AO
    if (index_name == 'AO Proxy') or (index_name == 'Vortex Strength'):
        index_out = -Aave

    # for vortex strength, average between 3 and 30 hPa
    if (index_name == 'Vortex Strength'):
        index_out = np.nanmean(Aave, axis=0)

    # return index over desired daterange
    return index_out
Beispiel #4
0
def compute_climate_indices(E,index_name,climatology_option = 'NODA',hostname='taurus',verbose=False):  

	"""
	This subroutine computes various simple climate indices for a dataset 
	defined by an experiment dictionary.  

	Currently supporting the following indices:  
	+ 'Aleutian Low' index of Garfinkel et al. (2010)
	+ 'East European High' index of Garfinkel et al. (2010)
	+ 'AO Proxy' -- Polar Cap GPH Anomaly at 500hPa -- it's a  proxy for the AO suggested by Cohen et al. (2002)   
		* note however that we define the polar cap as everything north of 70N, I think Cohen et al do 60N
	+ 'Vortex Strength' -- Polar Cap GPH Anomaly averaged 3-30hPa -- it's a measure of vortex strength suggested by Garfinkel et al. 2012

	"""

	# modify the experiment dictionary to retrieve the right index 
	EI = dart.climate_index_dictionaries(index_name)
	E['levrange'] = EI['levrange']
	E['latrange'] = EI['latrange']
	E['lonrange'] = EI['lonrange']
	E['variable'] = EI['variable']

	# for all indices defined so far, compute the anomaly
	# with respect to climatology  
	# this uses an anomaly subroutine from the MJO module  
	A,C,lat,lon,lev = mjo.ano(E,climatology_option = climatology_option,verbose=verbose)

	# Aleutian Low and East European high indices are single points, so just return the anomaly
	if (index_name == 'Aleutian Low') or (index_name == 'East European High'):
		index_out = A

	# for the Polar Cap GPH -based indices, average over latitude and longitude  
	# here can use a subroutine written for MJO stuff in the MJO module  
	if (index_name == 'AO Proxy') or (index_name == 'Vortex Strength'):
		lat1,lon1,Aave = mjo.aave(E,A,lat,lon,season=None,variable_name=None,averaging_dimension='all')

	# for the AO proxy, reverse the sign so that it's more intuitive -- a positive GPH anomaly is related to a negative AO	
	if (index_name == 'AO Proxy') or (index_name == 'Vortex Strength'):
		index_out = -Aave

	# for vortex strength, average between 3 and 30 hPa  
	if (index_name == 'Vortex Strength'):
		index_out = np.nanmean(Aave,axis=0)

	# return index over desired daterange
	return index_out
Beispiel #5
0
def find_paths(E, date, file_type='diag', hostname='taurus', debug=False):

    import DART as dart
    """
	This subroutine takes a DART experiment dictionary and returns the file path for the 
	needed diagnostic. 

	The optional input, `file_type`, can have one of these values:  
	+ 'covariance' -- then we load pre-computed data of covariances between state variables and a given obs  
	+ 'obs_epoch' -- load obs_epoch_XXXX.nc files  
	+ 'diag' -- load standard  DART Posterior_Diag or Prior_Diag files 
	+ 'truth' -- load true state files from a perfect-model simulation


	Note that if E has an additional entry called "extra_string", that string is added 
	to the name of the file that we retrieve -- that makes it easy to retrieve 
	unusual files that were created later but in the same style as other files. 
	"""

    path_found = False
    if E['run_category'] == 'NCAR':
        data_dir_list, truth_dir_list = exp_paths_NCAR(hostname, E['exp_name'])
        path_found = True
    if 'ERA' in E['exp_name']:
        data_dir_list, truth_dir_list = exp_paths_era(date,
                                                      hostname,
                                                      diagnostic=E['diagn'])
        path_found = True
    if not path_found:
        data_dir_list, truth_dir_list = exp_paths(hostname, E['exp_name'])

    #------------COVARIANCE FILES
    if file_type == 'covariance':
        fname = E['exp_name'] + '_' + 'covariance_' + E['obs_name'] + '_' + E[
            'variable'] + '_' + date.strftime('%Y-%m-%d') + '.nc'

    #------------OBS EPOCH FILES
    if file_type == 'obs_epoch':
        DR = get_experiment_date_ranges(E['exp_name'])
        delta_time = date - DR[0]
        obs_epoch_no = delta_time.days + 1
        if obs_epoch_no < 10:
            obs_epoch_name = 'obs_epoch_00' + str(obs_epoch_no) + '.nc'
        if (obs_epoch_no >= 10) and (obs_epoch_no < 100):
            obs_epoch_name = 'obs_epoch_0' + str(obs_epoch_no) + '.nc'
        if (obs_epoch_no >= 100):
            obs_epoch_name = 'obs_epoch_' + str(obs_epoch_no) + '.nc'
        if E['run_category'] is None:
            fname = '/dart/hist/' + obs_epoch_name
        if E['run_category'] == 'ERPDA':
            fname = '/../obs_epoch/' + obs_epoch_name

    #------------regular DART output files or true state files
    if (file_type == 'diag') or (file_type == 'truth'):
        if E['diagn'] == 'Truth':
            file_type = 'truth'
        # either load a given date, or a time mean
        if isinstance(date, str):
            endstring = date
        else:
            datestr = date.strftime("%Y-%m-%d")
            seconds = date.hour * 60 * 60
            if seconds == 0:
                timestr = '00000'
            else:
                timestr = str(seconds)
            endstring = datestr + '-' + timestr
        if E['run_category'] is None:
            diagstring = 'Diag'
            # additional diagnostics files have the 'Diag' string replaced with something else.
            TIL_variables = ['theta', 'ptrop', 'Nsq', 'P', 'brunt', 'ztrop']
            # the following list returns list of the above variables that appear in the requested variable type
            import re
            matches = [
                string for string in TIL_variables if string in E['variable']
            ]
            if len(matches) > 0:
                diagstring = 'TIL'
            if 'extrastring' not in E:
                E['extrastring'] = ''
            if E['extrastring'] == '':
                fname = '/dart/hist/cam_' + E[
                    'diagn'] + '_' + diagstring + '.' + endstring + '.nc'
                fname_truth = '/dart/hist/cam_' + 'True_State' + '.' + endstring + '.nc'
            else:
                fname = '/dart/hist/cam_' + E[
                    'diagn'] + '_' + diagstring + '.' + E[
                        'extrastring'] + '.' + endstring + '.nc'
                fname_truth = '/dart/hist/cam_' + 'True_State' + '.' + E[
                    'extrastring'] + '.' + endstring + '.nc'
        if E['run_category'] == 'ERPDA':
            gday = dart.date_to_gday(date)
            # for all my (Lisa's) old experiments, obs sequence 1 is 1 Jan 2009
            gday1 = dart.date_to_gday(datetime.datetime(2009, 1, 1, 0, 0, 0))
            obs_seq_no = int(gday - gday1 + 1)
            if (obs_seq_no < 10):
                mid = 'obs_000' + str(obs_seq_no)
            else:
                mid = 'obs_00' + str(obs_seq_no)
            fname_truth = mid + '/' + 'True_State.nc'
            fname = mid + '/' + E['diagn'] + '_Diag.nc'
        if E['run_category'] == 'NCAR':
            if E['exp_name'] == 'NCAR_LAONLY':
                suffix = '_LAONLY'
            else:
                suffix = ''
            fname_truth = '/' + 'True_State' + '_' + datestr + '-' + timestr + '.nc' + suffix
            fname = '/' + E[
                'diagn'] + '_Diag.' + datestr + '-' + timestr + '.nc' + suffix
        if file_type == 'truth':
            fname = fname_truth
            data_dir_list = truth_dir_list

    # if data_dir_list was not found, throw an error
    if data_dir_list is None:
        print(
            'experiment_settings.py cannot find settings for the following experiment dict:'
        )
        print(E)
        return None

    #-----search for the right files
    correct_filepath_found = False
    for data_dir in data_dir_list:
        filename = data_dir + fname
        if debug:
            print('Looking for file  ' + filename)
        if os.path.exists(filename):
            correct_filepath_found = True
            break

    # return the file filename with path
    return filename
Beispiel #6
0
def count_Nobs_in_time(E, output_interval=0, DART_qc_flags_list=[0]):
    """
	For a given experiment and list of observations, cycle through a daterange 
	and count the number of each observation type made at each time, 
	then return a Pandas dataframe that gives observations as a function of 
	observation type and name. 

	INPUTS:
	E: A standard DART experiment dictionary. The following keys are important:
		daterange: the range of dates over which to count the observations
		obs_name: a list of the observations to retrieve 
	output_interval: the number of steps (in the daterange) where we print output. 
		This can be helpful for checking progress, because this process can take a while. 
		The default is 0: no output printed to the screen at all.  
	DART_qc_flags_list: a list of DART quality control flags indicating which obs to retrieve. 
		The default is [0], i.e. only retrieving the assimilated observations. 
		Here is what the DART QC flags mean:  
			0= Assimilated  
			1= Evaluated only  
			2= Assimilated but posterior forward observation operator(s) failed  
			3= Evaluated only but posterior forward observation operator(s) failed  
			4= Not used, prior forward observation operator(s) failed  
			5= Not used because not selected in obs_kind_nml  
			6= Not used, failed prior quality control check  
			7= Not used, violated outlier threshold  
	"""

    # copy the input dict into a temporary one, and make sure that the right diagnostic (prior)
    # and copy (observation) are retrieved.
    P = E.copy()
    P['diagn'] = 'prior'
    P['copystring'] = 'observation'

    # save the daterange and create an empy dictionary with the dates as the keys
    DR = E['daterange']
    Sdict = dict.fromkeys(DR)

    #  loop over dates
    for D, ii in zip(DR, range(len(DR))):
        if output_interval != 0:
            if np.mod(ii, output_interval) == 0:
                print D
        P['daterange'] = [D]

        # for each date, load all the obs types
        OBS, copy_names, obs_names, lons, lats, levs, QCdict = dart.load_DART_obs_epoch_file(
            P, debug=False)

        # initialize an empty dictionary comprised of a list for each obs type
        obscount = {k: [] for k in P['obs_name']}

        # select only the obs where the DART quality control is 0
        QCD = QCdict['DART quality control            ']
        assimilated = [obs_names[i] for i, j in enumerate(QCD) if j == 0]

        # for each obs type, count how many times it occurs, and store in the dictionary
        for ObsName in P['obs_name']:
            nobs = assimilated.count(ObsName)
            obscount[ObsName] = nobs

        # now turn the dictionary into a Series
        S = pd.Series(obscount)

        # store the series for this date in a dictionary
        Sdict[D] = S

    # turn the dictionary into a pandas dataframe
    DF = pd.DataFrame(Sdict)

    return DF
Beispiel #7
0
def plot_DARTobs_scatter_lev_lat(E,
                                 colors=None,
                                 compare='QC',
                                 QC_list=range(8),
                                 yscale='log',
                                 alpha=0.5,
                                 hostname='taurus',
                                 debug=False,
                                 add_legend=False):
    """
	This code plots a scatterplot DART assimilated or evaluated
	observations on on a level-longitude slice.
	It is possible to simultaneously plot several obs types (listed in E['obs_name']), or 
	different DART quality control flags (listed in QC_list)  
	
	INPUTS:
	E: a DART experiment dictionary. The relevant keys are: 
		'latrange' : gives the lat limits of the plot
		'levrange' : gives the vertical level range of the plot  
		'copystring': a string giving the DART copies to show. If it's a list, we loop over the list
		'obs_name': a string giving the DART observation to show. If it's a list, we loop over the list
		'daterange': range of dates over which to plot the observations 
	colors: list of colors assigned to either the different types of obs plotted, or the QC values. 
		Default is None, which selected the colorbrewer qualitative 'Dark2' palette  
	compare: select either 'QC' to color code the QC values, or 'obs_type' to color code the observation types
		Default is 'QC'
	QC_list = list of QC values to plot. The default is all values from 0 to 7
	yscale: the scale of the levels axis -- choose 'linear' or 'log' -- default is log
	alpha: the degree of transparency. default is 0.5
	add_legend: set to True to show a legend. Default is False. 
	"""

    #--------- load the obs on the given day
    OBS, copy_names, obs_names, lons, lats, levs_Pa, QCdict = dart.load_DART_obs_epoch_file(
        E, debug=debug, hostname=hostname)

    # convert the levels from Pa to hPa
    levs = [ll / 100.0 for ll in levs_Pa]

    # define a list of colors if needed
    if colors is None:
        if compare is 'obs_type':
            if type(E['obs_name']) is not list:
                obs_type_list = [E['obs_name']]
            else:
                obs_type_list = E['obs_name']
            NN = len(obs_type_list)
        if compare is 'QC':
            NN = len(QC_list)
        ncol = np.min([NN, 12])
        if ncol < 3:
            ncol = 3

        # TODO: replace brewer2mpl call with palettable call
        colors, cmap, cmap_type = state_space_HCL_colormap(
            E, Ediff, reverse=reverse_colors)
        #bmap = brewer2mpl.get_map('Dark2', 'qualitative', ncol)
        #colors = bmap.mpl_colors

    # if comparing different observation types, loop over the list of obs
    # and select the lats and levs for the desired obs types
    if compare is 'obs_type':
        for obs_type, ii in zip(obs_type_list, range(len(obs_type_list))):
            levs_obstype = [
                levs[i] for i, x in enumerate(obs_names) if obs_type in x
            ]
            lats_obstype = [
                lats[i] for i, x in enumerate(obs_names) if obs_type in x
            ]

            # scatter the obs over the map
            y = levs_obstype
            x = lats_obstype
            plt.scatter(x,
                        y,
                        s=10,
                        color=colors[ii],
                        alpha=alpha,
                        rasterized=True)

    # if comparing different QC values , loop over the list of obs
    # and select the lats and levs for the desired obs types
    if compare is 'QC':
        DQC = QCdict['DART quality control            ']
        for QC, ii in zip(QC_list, range(len(QC_list))):
            levs_obstype = [levs[i] for i, x in enumerate(DQC) if QC == x]
            lats_obstype = [lats[i] for i, x in enumerate(DQC) if QC == x]

            # scatter the obs over the map
            y = levs_obstype
            x = lats_obstype
            plt.scatter(x,
                        y,
                        s=10,
                        color=colors[ii],
                        alpha=alpha,
                        rasterized=True)

    # axis labels
    plt.xlabel('Latitude')
    plt.ylabel('Pressure (hPa)')
    plt.yscale(yscale)
    plt.gca().invert_yaxis()

    # change the plot limits
    plt.xlim(E['latrange'])
    plt.ylim(E['levrange'])

    # add a legend
    if add_legend:
        if compare is 'QC':
            QCdef = DART_QC_values()
            QCnames = [QCdef[QCvalue] for QCvalue in QC_list]
            L = plt.legend(QCnames, loc='best')
        if compare is 'obs_type':
            L = plt.legend(obs_type_list, loc='best')
        return L

    return
Beispiel #8
0
def plot_DARTobs_scatter_globe(E,
                               projection='miller',
                               coastline_width=0,
                               water_color="#CCF3FF",
                               land_color="#996600",
                               colors=None,
                               compare='QC',
                               QC_list=range(8),
                               alpha=0.5,
                               hostname='taurus',
                               debug=False):
    """
	This code plots a scatterplot of the horizontal distribution of DART assimilated or evaluated
	observations on a map.
	
	INPUTS:
	E: a DART experiment dictionary. The relevant keys are: 
		'latrange' : gives the lat limits of the plot
		'lonrange' : gives the lon limits of the plot
		'levrange' : only observations between these levels are shown 
		'copystring': a string giving the DART copies to show. If it's a list, we loop over the list
		'obs_name': a string giving the DART observation to show. If it's a list, we loop over the list
		'daterange': range of dates over which to plot the observations 
	projection: the map projection to use (default is "miller")
	coastline_width: width of the coastlines; default is 0 (no coastlines)
	water_color: color given to oceans and lakes (default is cyan/blue)
	land_color: color given to continents (default is an earthy brown)
	obs_color: list of colors assigned to the different types of obs plotted. Default is colorbrewer Paired 
	compare: select either 'QC' to color code the QC values, or 'obs_type' to color code the observation types
		Default is 'QC'
	QC_list = list of QC values to plot. The default is all values from 0 to 7
	alpha: the degree of transparency. default is 0.5
	"""

    #---------set up the map-----------------
    # if plotting a polar stereographic projection, it's better to return all lats and lons, and then
    # cut off the unwanted regions with map limits -- otherwise we get artifical circles on a square map
    if (projection == 'npstere'):
        if E['latrange'][0] < 0:
            boundinglat = 0
        else:
            boundinglat = E['latrange'][0]
        E['latrange'] = [-90, 90]
        E['lonrange'] = [0, 361]
    if (projection == 'spstere'):
        boundinglat = E['latrange'][1]
        E['latrange'] = [-90, 90]
        E['lonrange'] = [0, 361]

# set up a map projection
    if projection == 'miller':
        maxlat = np.min([E['latrange'][1], 90.0])
        minlat = np.max([E['latrange'][0], -90.0])
        map = Basemap(projection='mill',llcrnrlat=minlat,urcrnrlat=maxlat,\
             llcrnrlon=E['lonrange'][0],urcrnrlon=E['lonrange'][1],resolution='l')
    if 'stere' in projection:
        map = Basemap(projection=projection,
                      boundinglat=boundinglat,
                      lon_0=0,
                      resolution='l')
    if projection == None:
        map = Basemap(projection='ortho', lat_0=54, lon_0=10, resolution='l')

# draw coastlines, country boundaries, fill continents.
    map.drawcoastlines(linewidth=coastline_width)
    map.drawmapboundary(fill_color=water_color)

    # draw lat/lon grid lines every 30 degrees.
    map.drawmeridians(np.arange(0, 360, 30), linewidth=0.25)
    map.drawparallels(np.arange(-90, 90, 30), linewidth=0.25)
    map.fillcontinents(color=land_color, lake_color=water_color, alpha=alpha)

    #--------- load the obs on the given day
    OBS, copy_names, obs_names, lons, lats, levs, QCdict = dart.load_DART_obs_epoch_file(
        E, debug=debug, hostname=hostname)

    #---------loop over obs types-----------------
    # loop over obs types given in E
    if type(E['obs_name']) is not list:
        obs_type_list = [E['obs_name']]
    else:
        obs_type_list = E['obs_name']

    # define a list of colors if needed
    if colors is None:
        if compare is 'obs_type':
            if type(E['obs_name']) is not list:
                obs_type_list = [E['obs_name']]
            else:
                obs_type_list = E['obs_name']
            NN = len(obs_type_list)
        if compare is 'QC':
            NN = len(QC_list)
        ncol = np.min([NN, 12])
        if ncol < 3:
            ncol = 3
        colors = palettable.colorbrewer.qualitative.Dark2.mpl_colors

    # if comparing observation types, loop over them and scatter plot individually
    if compare is 'obs_type':
        for obs_type, ii in zip(obs_type_list, range(len(obs_type_list))):
            lons_obstype = [
                lons[i] for i, x in enumerate(obs_names) if obs_type in x
            ]
            lats_obstype = [
                lats[i] for i, x in enumerate(obs_names) if obs_type in x
            ]

            # scatter the obs over the map
            x, y = map(lons_obstype, lats_obstype)
            map.scatter(x, y, 3, marker='o', color=colors[ii], rasterized=True)

    # if comparing different QC values , loop over the list of obs
    # and select the lats and levs for the desired obs types
    if compare is 'QC':
        DQC = QCdict['DART quality control            ']
        for QC, ii in zip(QC_list, range(len(QC_list))):
            lons_obstype = [lons[i] for i, x in enumerate(DQC) if QC == x]
            lats_obstype = [lats[i] for i, x in enumerate(DQC) if QC == x]

            # scatter the obs over the map
            x, y = map(lons_obstype, lats_obstype)
            map.scatter(x, y, 3, marker='o', color=colors[ii], rasterized=True)
    return
Beispiel #9
0
def HRRS_mean_ztrop_to_csv(DR, hostname='taurus', debug=False):
    """
	Given a certain daterange, retrieve available high res radiosonde data,
	compute the average tropopause height per station, and store in a 
	csv file. 
	"""
    from TIL import ztrop

    # first read in station information as a dataframe
    stationdata = HRRS_station_data(hostname)

    # because the HRRS data are sorted by years, loop over the years in the daterange
    y0 = DR[0].year
    yf = DR[len(DR) - 1].year
    years = range(y0, yf + 1, 1)
    for YYYY in years:

        # load a list of the available stations for that year
        Slist = HRRS_stations_available_per_year(YYYY)

        # also compute the subset of the requested daterange that fits into this year.
        year_daterange = dart.daterange(date_start=datetime.datetime(
            YYYY, 1, 1, 0, 0, 0),
                                        periods=365 * 4,
                                        DT='6H')
        DR2 = set(year_daterange).intersection(DR)

        # also find the dir where the station data live
        datadir = es.obs_data_paths('HRRS', hostname)

        # initialize empty dictionary to hold average tropoopause heights per station
        ztrop_dict = dict()

        # now loop over available stations, and for each one, retrieve the data
        # that fit into the requested daterange

        for s in Slist:
            ztrop_list = [
            ]  # empty list to hold tropopause heights for all available obs per station

            # loop over dates, and retrieve data if available
            for dd in DR2:
                datestr = dd.strftime("%Y%m%d%H")
                ff = datadir + '/' + str(YYYY) + '/' + str(s) + '/' + str(
                    s) + '-' + datestr + '_mod.dat'
                if os.path.exists(ff):

                    if debug:
                        print(ff)

                    # read in the station data
                    D = read_HRRS_data(ff)

                    # compute tropopause height
                    z = D['Alt'] / 1E3  # Altitude in km
                    T = D['Temp'] + 273.15  # Temp in Kelvin
                    ztropp = ztrop(z=z, T=T, debug=debug, hostname=hostname)

                    # add to list if not none
                    if ztropp is not None:
                        ztrop_list.append(ztropp)

            # average the tropopause heights and add to dictionary
            ztrop_dict[s] = np.mean(ztrop_list)

        # turn dict into data frame
        ZT = pd.Series(data=ztrop_dict, name='ztrop_mean')

        if debug:
            print(ZT)

        # turn dataframe into csv file
        hrrs_path = es.obs_data_paths('HRRS', hostname)
        datestr = DR[0].strftime("%Y%m%d") + '-' + DR[len(DR) - 1].strftime(
            "%Y%m%d") + '.csv'
        fname = hrrs_path + '/' + 'mean_tropopause_height_per_station_' + datestr
        print('storing file ' + fname)
        ZT.to_csv(fname, index=True, sep=',', header=True)

        return (ZT)
def count_Nobs_in_time(E,output_interval=0,DART_qc_flags_list=[0]):

	"""
	For a given experiment and list of observations, cycle through a daterange 
	and count the number of each observation type made at each time, 
	then return a Pandas dataframe that gives observations as a function of 
	observation type and name. 

	INPUTS:
	E: A standard DART experiment dictionary. The following keys are important:
		daterange: the range of dates over which to count the observations
		obs_name: a list of the observations to retrieve 
	output_interval: the number of steps (in the daterange) where we print output. 
		This can be helpful for checking progress, because this process can take a while. 
		The default is 0: no output printed to the screen at all.  
	DART_qc_flags_list: a list of DART quality control flags indicating which obs to retrieve. 
		The default is [0], i.e. only retrieving the assimilated observations. 
		Here is what the DART QC flags mean:  
			0= Assimilated  
			1= Evaluated only  
			2= Assimilated but posterior forward observation operator(s) failed  
			3= Evaluated only but posterior forward observation operator(s) failed  
			4= Not used, prior forward observation operator(s) failed  
			5= Not used because not selected in obs_kind_nml  
			6= Not used, failed prior quality control check  
			7= Not used, violated outlier threshold  
	"""

	# copy the input dict into a temporary one, and make sure that the right diagnostic (prior)
	# and copy (observation) are retrieved. 
	P = E.copy()
	P['diagn']='prior'
	P['copystring']='observation'

	# save the daterange and create an empy dictionary with the dates as the keys 
	DR = E['daterange']
	Sdict = dict.fromkeys(DR)

	#  loop over dates
	for D,ii in zip(DR,range(len(DR))):
		if output_interval != 0:
			if np.mod(ii,output_interval) == 0:
				print D
		P['daterange'] = [D]

		# for each date, load all the obs types
		OBS,copy_names,obs_names,lons,lats,levs,QCdict = dart.load_DART_obs_epoch_file(P,debug=False)

		# initialize an empty dictionary comprised of a list for each obs type
		obscount = {k:[] for k in P['obs_name']}

		# select only the obs where the DART quality control is 0
		QCD = QCdict['DART quality control            ']
		assimilated = [obs_names[i] for i, j in enumerate(QCD) if j ==0]

		# for each obs type, count how many times it occurs, and store in the dictionary
		for ObsName in P['obs_name']:
			nobs = assimilated.count(ObsName)
			obscount[ObsName] = nobs

		# now turn the dictionary into a Series
		S = pd.Series(obscount)

		# store the series for this date in a dictionary 
		Sdict[D] = S


	# turn the dictionary into a pandas dataframe 
	DF = pd.DataFrame(Sdict)

	return DF
def plot_DARTobs_scatter_lev_lat(E,colors=None,compare='QC',QC_list=range(8),yscale='log',alpha=0.5,hostname='taurus',debug=False,add_legend=False):

	"""
	This code plots a scatterplot DART assimilated or evaluated
	observations on on a level-longitude slice.
	It is possible to simultaneously plot several obs types (listed in E['obs_name']), or 
	different DART quality control flags (listed in QC_list)  
	
	INPUTS:
	E: a DART experiment dictionary. The relevant keys are: 
		'latrange' : gives the lat limits of the plot
		'levrange' : gives the vertical level range of the plot  
		'copystring': a string giving the DART copies to show. If it's a list, we loop over the list
		'obs_name': a string giving the DART observation to show. If it's a list, we loop over the list
		'daterange': range of dates over which to plot the observations 
	colors: list of colors assigned to either the different types of obs plotted, or the QC values. 
		Default is None, which selected the colorbrewer qualitative 'Dark2' palette  
	compare: select either 'QC' to color code the QC values, or 'obs_type' to color code the observation types
		Default is 'QC'
	QC_list = list of QC values to plot. The default is all values from 0 to 7
	yscale: the scale of the levels axis -- choose 'linear' or 'log' -- default is log
	alpha: the degree of transparency. default is 0.5
	add_legend: set to True to show a legend. Default is False. 
	"""

	#--------- load the obs on the given day 
	OBS,copy_names,obs_names,lons,lats,levs_Pa,QCdict = dart.load_DART_obs_epoch_file(E,debug=debug,hostname=hostname)

	# convert the levels from Pa to hPa
	levs = [ll/100.0 for ll in levs_Pa]


	# define a list of colors if needed 
	if colors is None:
		if compare is 'obs_type':
			if type(E['obs_name']) is not list:
				obs_type_list = [E['obs_name']]
			else:
				obs_type_list = E['obs_name']
			NN = len(obs_type_list)
		if compare is 'QC':
			NN = len(QC_list)
		ncol = np.min([NN,12])
		if ncol < 3:
			ncol=3

		# TODO: replace brewer2mpl call with palettable call 
		colors,cmap,cmap_type = state_space_HCL_colormap(E,Ediff,reverse=reverse_colors)
		#bmap = brewer2mpl.get_map('Dark2', 'qualitative', ncol)
		#colors = bmap.mpl_colors

	# if comparing different observation types, loop over the list of obs
	# and select the lats and levs for the desired obs types 
	if compare is 'obs_type':
		for obs_type,ii in zip(obs_type_list,range(len(obs_type_list))):
			levs_obstype = [levs[i] for i,x in enumerate(obs_names) if obs_type in x]
			lats_obstype = [lats[i] for i,x in enumerate(obs_names) if obs_type in x]

			# scatter the obs over the map 
			y = levs_obstype
			x = lats_obstype
			plt.scatter(x,y,s=10,color=colors[ii],alpha=alpha,rasterized=True)

	# if comparing different QC values , loop over the list of obs
	# and select the lats and levs for the desired obs types 
	if compare is 'QC':
		DQC = QCdict['DART quality control            ']
		for QC,ii in zip(QC_list,range(len(QC_list))):
			levs_obstype = [levs[i] for i,x in enumerate(DQC) if QC == x]
			lats_obstype = [lats[i] for i,x in enumerate(DQC) if QC == x]

			# scatter the obs over the map 
			y = levs_obstype
			x = lats_obstype
			plt.scatter(x,y,s=10,color=colors[ii],alpha=alpha,rasterized=True)

	# axis labels 
        plt.xlabel('Latitude')
        plt.ylabel('Pressure (hPa)')
	plt.yscale(yscale)
	plt.gca().invert_yaxis()

	# change the plot limits 
	plt.xlim(E['latrange'])
	plt.ylim(E['levrange'])

	# add a legend
	if add_legend:
		if compare is 'QC':	
			QCdef = DART_QC_values()
			QCnames = [QCdef[QCvalue] for QCvalue in QC_list]
			L = plt.legend(QCnames,loc='best')
		if compare is 'obs_type':
			L = plt.legend(obs_type_list,loc='best')
		return L 

	return 
Beispiel #12
0
def plot_climate_indices(E,
                         index_name,
                         copies_to_plot,
                         climatology_option='NODA',
                         hostname='taurus',
                         verbose=False):
    """
	This subroutine computes a bunch of simple climate indices 
	for a given experiment, and plots them for all copies given by 'copies_to_plot'

	INPUTS:
	copies_to_plot: list containing keywords for what copies to plot. Here are the options:  
	+ any valid copystring in DART output data  (e.g. "ensemble member 1")
	+ 'ensemble' = plot the entire ensemble  
	+ 'ensemble mean' = plot the ensemble mean  
	+ 'operational' = plot the operational value of this index 
	"""

    # create a list of copies to load
    copy_list = []

    if "copystring" in copies_to_plot:
        copy_list.append(E['copystring'])

    if ("ensemble" in copies_to_plot):
        N = dart.get_ensemble_size_per_run(E['exp_name'])
        for iens in np.arange(1, N + 1):
            if iens < 10:
                spacing = '      '
            else:
                spacing = '     '
            copy_list.append("ensemble member" + spacing + str(iens))
    if ("ensemble mean" in copies_to_plot):
        copy_list.append("ensemble mean")

    # retrieve desired index for all the copies in the list
    L = []
    for copy in copy_list:
        E['copystring'] = copy
        x = compute_climate_indices(E, index_name, climatology_option,
                                    hostname, verbose)
        L.append(x)

    # plot it
    for copy, climate_index in zip(copy_list, L):

        # define a color for the ensemble mean - depending on experiment
        lcolor = "#000000"
        if E['exp_name'] == 'W0910_NODA':
            bmap = brewer2mpl.get_map('Dark2', 'qualitative', 7)
            lcolor = bmap.hex_colors[1]
        if E['exp_name'] == 'W0910_GLOBAL':
            # make this one black, since it's sort of the reference
            lcolor = "#000000"
        if E['exp_name'] == 'W0910_TROPICS':
            bmap = brewer2mpl.get_map('Dark2', 'qualitative', 7)
            lcolor = bmap.hex_colors[0]

        # make the ensemble a lighter version of their original color
        ensemble_color = plot_tools.colorscale(lcolor, 1.6)

        # here is the plot
        if (copy == 'ensemble mean'):
            plt.plot(E['daterange'], climate_index, color=lcolor, linewidth=2)
        if "ensemble member" in copy:
            plt.plot(E['daterange'],
                     climate_index,
                     color=ensemble_color,
                     linewidth=1)
Beispiel #13
0
def Nsq_forcing_from_Q(E,datetime_in=None,debug=False,hostname='taurus'):

	"""
	Birner (2010) used the thermodynamic equation in the TEM form to derive an expression 
	for the rate of change of static stability (N2) due to residual motion and diabatic heating. 

	This subroutine compares the term due to diabatic heating, i.e.: 
	g d(Q/theta)dz

	INPUTS:
	E: a DART experiment dictionary. Relevant fields are:
		E['exp_name'] - the experiment name
		E['daterange'] - helps to choose which date to load in case this isn't specifically given
		E['variable'] - this determines what kind of diabatic heating we use:
			the value of E['variable'] should be a string like 'Nsq_forcing_XXXXX'
			where XXXXX is the model variable corresponding to whatever diabatic 
			heating type we are looking for. 
			For example, in WACCM, 'QRL_TOT' is the total longwave heating, so to get the 
			N2 forcing from that, just set E['variable']='Nsq_forcing_QRL_TOT'
	datetime_in: the date for which we want to compute this diagnostic. 
		default is None -- in this case, just choose the fist date in E['daterange']


	OUTPUTS:
	N2_forcing: Nsquared forcing term  in s^2/day
	lev
	lat 
	"""

	# necessary constants  
	H=7000.0	# scale height in m  
	p0=1000.0	# reference pressure in hPa  
	g=9.8		# acceleration of gravity 

	# load the desired diabatic heating term
	# this is not typically part of the DART output, so load from model history files
	# (right now this really only works for WACCM/CAM)  
	Qstring = E['variable'].strip('Nsq_forcing_')
	EQ = E.copy()
	EQ['variable']=Qstring
	Q2,lat,lon,lev = DSS.compute_DART_diagn_from_model_h_files(EQ,datetime_in,verbose=debug)
	# remove the time dimension, which should have length 1 
	Q = np.squeeze(Q2)

	# also load potential temperature 
	ET = E.copy()
	ET['variable']='theta'
	lev,lat,lon,theta2,P0,hybm,hyam = dart.load_DART_diagnostic_file(ET,datetime_in,hostname=hostname,debug=debug)
	# squeeze out extra dims, which we get if we load single copies (e.g. ensemble mean)
	theta = np.squeeze(theta2)

	# now find the longitude dimension and average over it  
	# for both Q and theta  
	nlon=len(lon)
	Mean_arrays = []
	for A in [Q,theta]:
		for idim,s in enumerate(A.shape):
			if s == nlon:
				londim=idim
		Mean_arrays.append(np.average(A,axis=londim))
	Q_mean=Mean_arrays[0]
	theta_mean=Mean_arrays[1]

	# if the shapes don't match up, might have to transpose one of them
#	if Mean_arrays[1].shape[0] != Q_mean.shape[0]:
#		theta_mean=np.transpose(Mean_arrays[1])
#	else:
#		theta_mean=Mean_arrays[1]
	
	# Q_mean should come out as copy x lev x lat, whereas theta_mean is copy x lat x lev  
	# to manually transpose Q_mean
	Q_mean2 = np.zeros(shape=theta_mean.shape)
	if Q_mean2.ndim==3:
		for icopy in range(theta_mean.shape[0]):
			for ilat in range(theta_mean.shape[1]):
				for ilev in range(theta_mean.shape[2]):
					Q_mean2[icopy,ilat,ilev]=Q_mean[icopy,ilev,ilat]
	else:
		for ilat in range(theta_mean.shape[0]):
			for ilev in range(theta_mean.shape[1]):
				Q_mean2[ilat,ilev]=Q_mean[ilev,ilat]
		
	# divide Q by theta
	X = Q_mean2/theta_mean

	# convert pressure levels to approximate altitude and take the vertical gradient  
	zlev = H*np.log(p0/lev)
	dZ = np.gradient(zlev)   # gradient of vertical levels in m

	# now X *should* have shape (copy x lat x lev) OR (lat x lev)
	# so need to copy dZ to look like this 
	if X.ndim==3:
		dZm = dZ[None,None,:]
		levdim=2
	if X.ndim==2:
		dZm = dZ[None,:]
		levdim=1
	dZ3 = np.broadcast_to(dZm,X.shape)
	dXdZ_3D = np.gradient(X,dZ3)
	dxdz = dXdZ_3D[levdim] # this is the vertical gradient with respect to height 

	# the above calculation yields a quantity in units s^-2/s, but it makes more sense 
	# in the grand scheme of things to look at buoyancy forcing per day, so here 
	# is a conversion factor.
	seconds_per_day = 60.*60.*24.0

	# now loop over ensemble members and compute the n2 forcing for each one
	N2_forcing = g*dxdz*seconds_per_day

	return N2_forcing,lat,lev
Beispiel #14
0
def plot_climate_indices(E,index_name,copies_to_plot,climatology_option = 'NODA',hostname='taurus',verbose=False):

	"""
	This subroutine computes a bunch of simple climate indices 
	for a given experiment, and plots them for all copies given by 'copies_to_plot'

	INPUTS:
	copies_to_plot: list containing keywords for what copies to plot. Here are the options:  
	+ any valid copystring in DART output data  (e.g. "ensemble member 1")
	+ 'ensemble' = plot the entire ensemble  
	+ 'ensemble mean' = plot the ensemble mean  
	+ 'operational' = plot the operational value of this index 
	"""

	# create a list of copies to load
	copy_list = []

	if "copystring" in copies_to_plot:
		copy_list.append(E['copystring'])

	if ("ensemble" in copies_to_plot): 
		N = dart.get_ensemble_size_per_run(E['exp_name'])
		for iens in np.arange(1,N+1):
			if iens < 10:
				spacing = '      '
			else:
				spacing = '     '
			copy_list.append("ensemble member"+spacing+str(iens))		
	if ("ensemble mean" in copies_to_plot): 
			copy_list.append("ensemble mean")

	# retrieve desired index for all the copies in the list
	L = []
	for copy in copy_list:
		E['copystring'] = copy
		x = compute_climate_indices(E,index_name,climatology_option,hostname,verbose)
		L.append(x)

	# plot it  
	for copy,climate_index in zip(copy_list,L):

		# define a color for the ensemble mean - depending on experiment   
		lcolor = "#000000"
		if E['exp_name'] == 'W0910_NODA':
			bmap = brewer2mpl.get_map('Dark2', 'qualitative', 7)
			lcolor = bmap.hex_colors[1]
		if E['exp_name'] == 'W0910_GLOBAL':
			# make this one black, since it's sort of the reference  
			lcolor = "#000000"
		if E['exp_name'] == 'W0910_TROPICS':
			bmap = brewer2mpl.get_map('Dark2', 'qualitative', 7)
			lcolor = bmap.hex_colors[0]

		# make the ensemble a lighter version of their original color  
		ensemble_color = plot_tools.colorscale(lcolor, 1.6)

		# here is the plot  
		if (copy == 'ensemble mean'):
			plt.plot(E['daterange'],climate_index,color=lcolor,linewidth=2)
		if "ensemble member" in copy:
			plt.plot(E['daterange'],climate_index,color=ensemble_color,linewidth=1)
Beispiel #15
0
def Nsq_forcing_from_RC(E,datetime_in=None,debug=False,hostname='taurus'):

	"""
	Birner (2010) used the thermodynamic equation in the TEM form to derive an expression 
	for the rate of change of static stability (N2) due to residual motion and diabatic heating. 

	This subroutine compares those terms from the dynamical heating rates computed by Wuke Wang. 
	The vertical motion (wstar) term is -d(wsar*Nsq)/dz.  
	Wuke already computed WS = -wstar*HNsq/R, so it's easiest to load that data, divide out H and R, and then take the vertical gradient. 

	The horizontal term is -g d(vstar/theta * d(theta)dy)/dz. 
	Wuke already computed the heating rate term v*dtheta/dy = v*dTdy, 
	so the easiest thing to do is to multiply the heating rates by g/theta
	and then take the vertical gradient. 

	INPUTS:
	E: a DART experiment dictionary. Relevant fields are:
		E['exp_name'] - the experiment name
		E['daterange'] - helps to choose which date to load in case this isn't specifically given
		E['variable'] - if this is set to N2_forcing_vstar, the code returns the N2 forcing due to 
			meridional residual circulation. For anything else, it returns the forcing 
			due to vertical residual circulation. 
	datetime_in: the date for which we want to compute this diagnostic. 
		default is None -- in this case, just choose the fist date in E['daterange']


	OUTPUTS:
	N2_forcing: Nsquared forcing term  in s^2/day
	lev
	lat 
	"""

	# necessary constants  
	H=7000.0	# scale height in m  
	g = 9.80
	p0=1000.0	# reference pressure in hPa  

	if datetime_in is None:
		datetime_in = E['daterange'][0]

	# depending on which term we want, need to load the residual circulation component and some other stuff, 
	# and then derive a quantity for which we take the vertical gradient 
	ERC = E.copy()
	ET=E.copy()
	if E['variable'] == 'Nsq_vstar_forcing':
		ET['variable']='theta'
		lev,lat,lon,theta,P0,hybm,hyam = dart.load_DART_diagnostic_file(ET,datetime_in,hostname=hostname,debug=debug)
		ERC['variable']='VSTAR'
		vstar,lat,lev = DSS.compute_DART_diagn_from_Wang_TEM_files(ERC,datetime_in,hostname=hostname,debug=debug)

		# the above routines do not return arrays of consistent shape, so have to do 
		# some acrobatics to get everything to match up. 

		# find how the dimensions fit to the shape 
		nlon=len(lon)
		nlat=len(lat)
		nlev=len(lev)
		for idim,s in enumerate(theta.shape):
			if s==nlon:
				londim=idim
				latdim=idim
				levdim=idim
			
		# take the zonal mean of potential temp  - this should make its shape copy x lat x lev
		thetam = np.average(theta,axis=londim)

		# next step is to find the meridional gradient of theta 
		# latitude steps --> convert to distance (arclength)
		rlat = np.deg2rad(lat)
		Re = 6371000.0		# radius of Earth in m 
		y = Re*rlat
		dy = np.gradient(y)
		# need to replicate dy to suit the shape of zonal mean theta 
		dym = dy[None,:,None]
		dy3 = np.broadcast_to(dym,thetam.shape)
		# here is the gradient - need to squeeze out a possible length-1 
		# copy dimension 
		dthetady_list = np.gradient(np.squeeze(thetam),np.squeeze(dy3))

		# now find which dimension of _squeezed_ thetam corresponds to latitude - 
		# that's the gradient that we want
		# (is this a pain in the ass? Yes! But I haven't yet found a more clever approach) 
		for idim,s in enumerate(np.squeeze(thetam).shape):
			if s==nlat:
				newlatdim=idim
		dthetady = dthetady_list[newlatdim]

		# the meridional gradient of zonal mean theta then gets multiplied by vstar and g/theta. But...  
		
		# the subroutine compute_DART_diagn_from_Wang_TEM_files delivers an array with 
		# dimensions lev x lat x copy (or just levxlat)
		# whereas N2 should come out as copy x lat x lev (or simply lat x lev)
		# need to transpose this, but I don't trust np.reshape - do it manually 
		vstar2 = np.zeros(shape=dthetady.shape)
		if vstar2.ndim==3:
			for icopy in range(dthetady.shape[0]):
				for ilat in range(dthetady.shape[1]):
					for ilev in range(dthetady.shape[2]):
						vstar2[icopy,ilat,ilev]=vstar[icopy,ilev,ilat]
		else:
			for ilat in range(dthetady.shape[0]):
				for ilev in range(dthetady.shape[1]):
					vstar2[ilat,ilev]=vstar[ilev,ilat]

		X = (g/np.squeeze(thetam))*vstar2*dthetady

	else:
		
		ET['variable']='Nsq'
		lev,lat,lon,Nsq,P0,hybm,hyam = dart.load_DART_diagnostic_file(ET,datetime_in,hostname=hostname,debug=debug)
		ERC['variable']='WSTAR'
		wstar,lat,lev = DSS.compute_DART_diagn_from_Wang_TEM_files(ERC,datetime_in,hostname=hostname,debug=debug)

		# find how the dimensions fit to the shape 
		nlon=len(lon)
		nlat=len(lat)
		nlev=len(lev)
		for idim,s in enumerate(Nsq.shape):
			if s==nlon:
				londim=idim
				latdim=idim
				levdim=idim
        
		# take the zonal mean of buoyancy frequency 
		Nsqm = np.average(Nsq,axis=londim)
		
		# might have to squeeze out a length-1 copy dimension 
		Nsqm2 = np.squeeze(Nsqm)

		# the subroutine compute_DART_diagn_from_Wang_TEM_files delivers an array with dimensions lev x lat x copy (or just levxlat)
		# whereas N2 should come out as copy x lat x lev (or simply lat x lev)
		# need to transpose this, but I don't trust np.reshape - do it manually 
		wstar2 = np.zeros(shape=Nsqm2.shape)
		if wstar2.ndim==3:
			for icopy in range(Nsqm2.shape[0]):
				for ilat in range(Nsqm2.shape[1]):
					for ilev in range(Nsqm2.shape[2]):
						wstar2[icopy,ilat,ilev]=wstar[icopy,ilev,ilat]
		else:
			for ilat in range(Nsqm2.shape[0]):
				for ilev in range(Nsqm2.shape[1]):
					wstar2[ilat,ilev]=wstar[ilev,ilat]

		X = Nsqm2*wstar2

	# convert pressure levels to approximate altitude and take the vertical gradient  
	zlev = H*np.log(p0/lev)
	dZ = np.gradient(zlev)   # gradient of vertical levels in m

	# now X *should* have shape (copy x lat x lev) OR (lat x lev)
	# so need to copy dZ to look like this 
	if X.ndim==3:
		dZm = dZ[None,None,:]
		levdim=2
	if X.ndim==2:
		dZm = dZ[None,:]
		levdim=1
	dZ3 = np.broadcast_to(dZm,X.shape)
	dXdZ_3D = np.gradient(X,dZ3)
	dxdz = dXdZ_3D[levdim] # this is the vertical gradient with respect to height 

	# the above calculation yields a quantity in units s^-2/s, but it makes more sense 
	# in the grand scheme of things to look at buoyancy forcing per day, so here 
	# is a conversion factor.
	seconds_per_day = 60.*60.*24.0

	N2_forcing = -dxdz*seconds_per_day

	return N2_forcing,lat,lev
Beispiel #16
0
def Nsq_forcing_from_Q(E, datetime_in=None, debug=False, hostname='taurus'):
    """
	Birner (2010) used the thermodynamic equation in the TEM form to derive an expression 
	for the rate of change of static stability (N2) due to residual motion and diabatic heating. 

	This subroutine compares the term due to diabatic heating, i.e.: 
	g d(Q/theta)dz

	INPUTS:
	E: a DART experiment dictionary. Relevant fields are:
		E['exp_name'] - the experiment name
		E['daterange'] - helps to choose which date to load in case this isn't specifically given
		E['variable'] - this determines what kind of diabatic heating we use:
			the value of E['variable'] should be a string like 'Nsq_forcing_XXXXX'
			where XXXXX is the model variable corresponding to whatever diabatic 
			heating type we are looking for. 
			For example, in WACCM, 'QRL_TOT' is the total longwave heating, so to get the 
			N2 forcing from that, just set E['variable']='Nsq_forcing_QRL_TOT'
	datetime_in: the date for which we want to compute this diagnostic. 
		default is None -- in this case, just choose the fist date in E['daterange']


	OUTPUTS:
	N2_forcing: Nsquared forcing term  in s^2/day
	lev
	lat 
	"""

    # necessary constants
    H = 7000.0  # scale height in m
    p0 = 1000.0  # reference pressure in hPa
    g = 9.8  # acceleration of gravity

    # load the desired diabatic heating term
    # this is not typically part of the DART output, so load from model history files
    # (right now this really only works for WACCM/CAM)
    Qstring = E['variable'].strip('Nsq_forcing_')
    EQ = E.copy()
    EQ['variable'] = Qstring
    DQ = DSS.compute_DART_diagn_from_model_h_files(EQ,
                                                   datetime_in,
                                                   verbose=debug)
    # remove the time dimension, which should have length 1
    DQ['data'] = np.squeeze(DQ['data'])

    # also load potential temperature
    ET = E.copy()
    ET['variable'] = 'theta'
    Dtheta = dart.load_DART_diagnostic_file(ET,
                                            datetime_in,
                                            hostname=hostname,
                                            debug=debug)
    # squeeze out extra dims, which we get if we load single copies (e.g. ensemble mean)
    Dtheta['data'] = np.squeeze(Dtheta['data'])

    # now find the longitude dimension and average over it
    # for both Q and theta
    Q_mean = DSS.average_over_named_dimension(DQ['data'], DQ['lon'])
    theta_mean = DSS.average_over_named_dimension(Dtheta['data'],
                                                  Dtheta['lon'])

    # if the shapes don't match up, might have to transpose one of them
    #	if Mean_arrays[1].shape[0] != Q_mean.shape[0]:
    #		theta_mean=np.transpose(Mean_arrays[1])
    #	else:
    #		theta_mean=Mean_arrays[1]

    # Q_mean should come out as copy x lev x lat, whereas theta_mean is copy x lat x lev
    # to manually transpose Q_mean
    Q_mean2 = np.zeros(shape=theta_mean.shape)
    if Q_mean2.ndim == 3:
        for icopy in range(theta_mean.shape[0]):
            for ilat in range(theta_mean.shape[1]):
                for ilev in range(theta_mean.shape[2]):
                    Q_mean2[icopy, ilat, ilev] = Q_mean[icopy, ilev, ilat]
    else:
        for ilat in range(theta_mean.shape[0]):
            for ilev in range(theta_mean.shape[1]):
                Q_mean2[ilat, ilev] = Q_mean[ilev, ilat]

    # divide Q by theta
    X = Q_mean2 / theta_mean

    # convert pressure levels to approximate altitude and take the vertical gradient
    lev = DQ['lev']
    zlev = H * np.log(p0 / lev)
    dZ = np.gradient(zlev)  # gradient of vertical levels in m

    # now X *should* have shape (copy x lat x lev) OR (lat x lev)
    # so need to copy dZ to look like this
    if X.ndim == 3:
        dZm = dZ[None, None, :]
        levdim = 2
    if X.ndim == 2:
        dZm = dZ[None, :]
        levdim = 1
    dZ3 = np.broadcast_to(dZm, X.shape)
    dXdZ_3D = np.gradient(X, dZ3)
    dxdz = dXdZ_3D[
        levdim]  # this is the vertical gradient with respect to height

    # the above calculation yields a quantity in units s^-2/s, but it makes more sense
    # in the grand scheme of things to look at buoyancy forcing per day, so here
    # is a conversion factor.
    seconds_per_day = 60. * 60. * 24.0

    # now loop over ensemble members and compute the n2 forcing for each one
    N2_forcing = g * dxdz * seconds_per_day

    D = dict()
    D['data'] = N2_forcing
    D['lev'] = DQ['lev']
    D['lat'] = DQ['lat']
    D['units'] = 's^{-2}/day'
    D['long_name'] = 'N^{2} Forcing'

    return D
Beispiel #17
0
def Nsq_forcing_from_RC(E, datetime_in=None, debug=False, hostname='taurus'):
    """
	Birner (2010) used the thermodynamic equation in the TEM form to derive an expression 
	for the rate of change of static stability (N2) due to residual motion and diabatic heating. 

	This subroutine compares those terms from the dynamical heating rates computed by Wuke Wang. 
	The vertical motion (wstar) term is -d(wsar*Nsq)/dz.  
	Wuke already computed WS = -wstar*HNsq/R, so it's easiest to load that data, divide out H and R, and then take the vertical gradient. 

	The horizontal term is -g d(vstar/theta * d(theta)dy)/dz. 
	Wuke already computed the heating rate term v*dtheta/dy = v*dTdy, 
	so the easiest thing to do is to multiply the heating rates by g/theta
	and then take the vertical gradient. 

	INPUTS:
	E: a DART experiment dictionary. Relevant fields are:
		E['exp_name'] - the experiment name
		E['daterange'] - helps to choose which date to load in case this isn't specifically given
		E['variable'] - if this is set to N2_forcing_vstar, the code returns the N2 forcing due to 
			meridional residual circulation. For anything else, it returns the forcing 
			due to vertical residual circulation. 
	datetime_in: the date for which we want to compute this diagnostic. 
		default is None -- in this case, just choose the fist date in E['daterange']


	OUTPUTS:
	N2_forcing: Nsquared forcing term  in s^2/day
	lev
	lat 
	"""

    # necessary constants
    H = 7000.0  # scale height in m
    g = 9.80
    p0 = 1000.0  # reference pressure in hPa

    if datetime_in is None:
        datetime_in = E['daterange'][0]

    # depending on which term we want, need to load the residual circulation component and some other stuff,
    # and then derive a quantity for which we take the vertical gradient
    ERC = E.copy()
    ET = E.copy()
    if E['variable'] == 'Nsq_vstar_forcing':
        ET['variable'] = 'theta'
        Dtheta = dart.load_DART_diagnostic_file(ET,
                                                datetime_in,
                                                hostname=hostname,
                                                debug=debug)
        theta = Dtheta['data']
        lat = Dtheta['lat']
        lon = Dtheta['lon']
        lev = Dtheta['lev']
        ERC['variable'] = 'VSTAR'
        Dvstar = DSS.compute_DART_diagn_from_Wang_TEM_files(ERC,
                                                            datetime_in,
                                                            hostname=hostname,
                                                            debug=debug)
        vstar = Dvstar['data']

        # the above routines do not return arrays of consistent shape, so have to do
        # some acrobatics to get everything to match up.

        # find how the dimensions fit to the shape
        nlon = len(lon)
        nlat = len(lat)
        nlev = len(lev)
        for idim, s in enumerate(theta.shape):
            if s == nlon:
                londim = idim
                latdim = idim
                levdim = idim

        # take the zonal mean of potential temp  - this should make its shape copy x lat x lev
        thetam = np.average(theta, axis=londim)

        # next step is to find the meridional gradient of theta
        # latitude steps --> convert to distance (arclength)
        rlat = np.deg2rad(lat)
        Re = 6371000.0  # radius of Earth in m
        y = Re * rlat
        dy = np.gradient(y)
        # need to replicate dy to suit the shape of zonal mean theta
        dym = dy[None, :, None]
        dy3 = np.broadcast_to(dym, thetam.shape)
        # here is the gradient - need to squeeze out a possible length-1
        # copy dimension
        dthetady_list = np.gradient(np.squeeze(thetam), np.squeeze(dy3))

        # now find which dimension of _squeezed_ thetam corresponds to latitude -
        # that's the gradient that we want
        # (is this a pain in the ass? Yes! But I haven't yet found a more clever approach)
        for idim, s in enumerate(np.squeeze(thetam).shape):
            if s == nlat:
                newlatdim = idim
        dthetady = dthetady_list[newlatdim]

        # the meridional gradient of zonal mean theta then gets multiplied by vstar and g/theta. But...

        # the subroutine compute_DART_diagn_from_Wang_TEM_files delivers an array with
        # dimensions lev x lat x copy (or just levxlat)
        # whereas N2 should come out as copy x lat x lev (or simply lat x lev)
        # need to transpose this, but I don't trust np.reshape - do it manually
        vstar2 = np.zeros(shape=dthetady.shape)
        if vstar2.ndim == 3:
            for icopy in range(dthetady.shape[0]):
                for ilat in range(dthetady.shape[1]):
                    for ilev in range(dthetady.shape[2]):
                        vstar2[icopy, ilat, ilev] = vstar[icopy, ilev, ilat]
        else:
            for ilat in range(dthetady.shape[0]):
                for ilev in range(dthetady.shape[1]):
                    vstar2[ilat, ilev] = vstar[ilev, ilat]

        X = (g / np.squeeze(thetam)) * vstar2 * dthetady

    else:

        ET['variable'] = 'Nsq'
        D = dart.load_DART_diagnostic_file(ET,
                                           datetime_in,
                                           hostname=hostname,
                                           debug=debug)
        Nsq = D['data']
        lat = D['lat']
        lon = D['lon']
        lev = D['lev']

        ERC['variable'] = 'WSTAR'
        Dwstar = DSS.compute_DART_diagn_from_Wang_TEM_files(ERC,
                                                            datetime_in,
                                                            hostname=hostname,
                                                            debug=debug)
        wstar = Dwstar['data']

        # find how the dimensions fit to the shape
        nlon = len(lon)
        nlat = len(lat)
        nlev = len(lev)
        for idim, s in enumerate(Nsq.shape):
            if s == nlon:
                londim = idim
                latdim = idim
                levdim = idim

        # take the zonal mean of buoyancy frequency
        Nsqm = np.average(Nsq, axis=londim)

        # might have to squeeze out a length-1 copy dimension
        Nsqm2 = np.squeeze(Nsqm)

        # the subroutine compute_DART_diagn_from_Wang_TEM_files delivers an array with dimensions lev x lat x copy (or just levxlat)
        # whereas N2 should come out as copy x lat x lev (or simply lat x lev)
        # need to transpose this, but I don't trust np.reshape - do it manually
        wstar2 = np.zeros(shape=Nsqm2.shape)
        if wstar2.ndim == 3:
            for icopy in range(Nsqm2.shape[0]):
                for ilat in range(Nsqm2.shape[1]):
                    for ilev in range(Nsqm2.shape[2]):
                        wstar2[icopy, ilat, ilev] = wstar[icopy, ilev, ilat]
        else:
            for ilat in range(Nsqm2.shape[0]):
                for ilev in range(Nsqm2.shape[1]):
                    wstar2[ilat, ilev] = wstar[ilev, ilat]

        X = Nsqm2 * wstar2

    # convert pressure levels to approximate altitude and take the vertical gradient
    zlev = H * np.log(p0 / lev)
    dZ = np.gradient(zlev)  # gradient of vertical levels in m

    # now X *should* have shape (copy x lat x lev) OR (lat x lev)
    # so need to copy dZ to look like this
    if X.ndim == 3:
        dZm = dZ[None, None, :]
        levdim = 2
    if X.ndim == 2:
        dZm = dZ[None, :]
        levdim = 1
    dZ3 = np.broadcast_to(dZm, X.shape)
    dXdZ_3D = np.gradient(X, dZ3)
    dxdz = dXdZ_3D[
        levdim]  # this is the vertical gradient with respect to height

    # the above calculation yields a quantity in units s^-2/s, but it makes more sense
    # in the grand scheme of things to look at buoyancy forcing per day, so here
    # is a conversion factor.
    seconds_per_day = 60. * 60. * 24.0

    N2_forcing = -dxdz * seconds_per_day

    D = dict()
    D['data'] = N2_forcing
    D['lat'] = lat
    D['lev'] = lev
    D['units'] = 's^{-2}/day'
    D['long_name'] = 'N^{2} Forcing'

    return D
def get_experiment_date_ranges(exp_name):

	# stored date ranges for various DART experiments  
	DR = None

	# CAM experiments for ERP assimilation study  
	if exp_name == 'NODA':
		DR = dart.daterange(date_start=datetime.datetime(2009,1,1,0,0,0), periods=31, DT='1D')
	if exp_name == 'ERPALL':
		DR = dart.daterange(date_start=datetime.datetime(2009,1,1,0,0,0), periods=31, DT='1D')
	if exp_name == 'RST':
		DR = dart.daterange(date_start=datetime.datetime(2009,1,1,0,0,0), periods=17, DT='1D')
	if exp_name == 'ERPRST':
		DR = dart.daterange(date_start=datetime.datetime(2009,1,1,0,0,0), periods=17, DT='1D')

	# DART-WACCM runs performed at GEOMAR  
	if exp_name == 'PMO32':
		DR = dart.daterange(date_start=datetime.datetime(2009,10,1,6,0,0), periods=31, DT='6H')
	if exp_name == 'W0910_NODA':
		DR = dart.daterange(date_start=datetime.datetime(2009,10,1,12,0,0), periods=596, DT='6H')
	if exp_name == 'W0910_GLOBAL':
		DR = dart.daterange(date_start=datetime.datetime(2009,10,1,12,0,0), periods=596, DT='6H')
	if exp_name == 'W0910_TROPICS':
		DR = dart.daterange(date_start=datetime.datetime(2009,10,1,12,0,0), periods=596, DT='6H')
	if exp_name == 'W0910_NODART':
		DR = dart.daterange(date_start=datetime.datetime(2009,10,1,12,0,0), periods=10, DT='6H')
	if exp_name == 'W0910_NOSTOP':
		DR = dart.daterange(date_start=datetime.datetime(2009,10,1,12,0,0), periods=64, DT='6H')
	
	# WACCM PMO runs performed by Nick Pedatella at NCAR
	if exp_name == 'NCAR_PMO_CONTROL':
		DR = dart.daterange(date_start=datetime.datetime(2008,11,6,6,0,0), periods=72, DT='6H')
	if exp_name == 'NCAR_PMO_LAS':
		DR = dart.daterange(date_start=datetime.datetime(2008,11,6,6,0,0), periods=72, DT='6H')
	if exp_name == 'NCAR_PMO_LA':
		DR = dart.daterange(date_start=datetime.datetime(2008,11,6,6,0,0), periods=72, DT='6H')

	# WACCM real-obs runs performed by Nick Pedatella at NCAR
	if exp_name == 'NCAR_FULL':
		DR = dart.daterange(date_start=datetime.datetime(2009,1,1,6,0,0), periods=204, DT='6H')
	if exp_name == 'NCAR_LAONLY':
		DR = dart.daterange(date_start=datetime.datetime(2009,1,1,6,0,0), periods=204, DT='6H')

	if DR is None:
		print('find_paths Cannot find experiment '+exp_name+' returning...')

	return DR
Beispiel #19
0
def HRRS_as_DF(OBS,TPbased=False,TPbased_vertical_res=50E-3,hostname='taurus',debug=False):

	"""
	Loop over a set of dates and a specified latitude- and longitude range, and return 
	the available high-resolution radiosonde data as a pandas data frame  
	
	INPUTS:
	OBS: a dictionary with the following entries:  
		daterange: a list of datetime objects that give the desired date range  
		latrange: a list giving the bounding latitudes of the desired range 
		lonrange: a list giving the bounding longitudes of the desired range 
		Note that OBS can be a DART experiment dictionary (see DART.py), but the DART/model 
			specific entries are ignored. 
	TPbased: set to True to return the profiles ordered into regularly-spaced altitudes 
		relative to the tropopause  - default is False. 
	hostname: default is taurus 
	debug: set to True to print some stuff out. Default is False. 
	TPbased_vertical_res: resolution of the grid to which we inteprolate the obs doing TP-based 
		coordinates. Default is 50m. 
	"""

	# first read in station information as a dataframe 
	stationdata = HRRS_station_data(hostname)
	
	# initialize an empy list which will hold the data frames for each station and time 
	DFlist=[]

	# because the HRRS data are sorted by years, loop over the years in the daterange
	DR=OBS['daterange']
	y0 = DR[0].year
	yf = DR[len(DR)-1].year
	years = range(y0,yf+1,1)
	for YYYY in years:  

		# load a list of the available stations for that year  
		Slist  = HRRS_stations_available_per_year(YYYY)

		# trim list down to the ones that fit into the latitude range 
		stations_lat = [s for s in Slist 
				if stationdata.loc[int(s)]['Lat'] >= OBS['latrange'][0] 
				and stationdata.loc[int(s)]['Lat'] <= OBS['latrange'][1] ]

		# trim list down to the ones that fit into the longitude range 
		stations_latlon = [s for s in stations_lat
				if stationdata.loc[int(s)]['Lon'] >= OBS['lonrange'][0] 
				and stationdata.loc[int(s)]['Lon'] <= OBS['lonrange'][1] ]

		# also compute the subset of the requested daterange that fits into this year. 
		year_daterange =  dart.daterange(date_start=datetime.datetime(YYYY,1,1,0,0,0), periods=365*4, DT='6H')
		DR2 = set(year_daterange).intersection(DR)
		
		# also find the dir where the station data live 
		datadir = es.obs_data_paths('HRRS',hostname)

		# now loop over available stations, and for each one, retrieve the data 
		# that fit into the requested daterange 
		for s in stations_latlon:	

			# loop over dates, and retrieve data if available 
			for dd in DR2:
				datestr = dd.strftime("%Y%m%d%H")
				ff = datadir+'/'+str(YYYY)+'/'+str(s)+'/'+str(s)+'-'+datestr+'_mod.dat'
				if os.path.exists(ff):

					if debug:
						print(ff)

					# read in the station data 
					if TPbased:
						D = TP_based_HRRS_data(ff,vertical_res_km=TPbased_vertical_res)
						alt_to_km = 1.0    # here the altitude is already in km
						temp_to_K = 0.0
					else:
						D = read_HRRS_data(ff)
						alt_to_km = 1.0E-3     # raw data are in m -- convert to km 
						temp_to_K = 273.15	# raw data need to be converted to kelvin
		
					if D is not None:
						# also add a column holding the date 
						D['Date'] = pd.Series(dd, index=D.index)

						# also add a column holding the station number 
						D['StationNumber'] = pd.Series(s, index=D.index)

						# make sure altitude is in km 
						# and temp in Kelvin
						D['Alt']=D['Alt']*alt_to_km
						D['Temp']=D['Temp']+temp_to_K
					
						# get rid of some unneeded columns 
						if not TPbased:
							useless_cols=['Time','Dewpt','RH','Ucmp','Vcmp','spd','dir', 
									'Wcmp',  'Ele', 'Azi', 'Qp', 'Qt', 'Qrh', 'Qu', 'Qv', 'QdZ']
							D.drop(useless_cols,inplace=True,axis=1)

						# append to list of data frames 
						DFlist.append(D)


	# merge the list of data frames into a single DF using list comprehension 
	DFout = pd.concat(DFlist, axis=0)

	return(DFout)
def find_paths(E,date,file_type='diag',hostname='taurus',debug=False):

	import DART as dart
	"""
	This subroutine takes a DART experiment dictionary and returns the file path for the 
	needed diagnostic. 

	The optional input, `file_type`, can have one of these values:  
	+ 'covariance' -- then we load pre-computed data of covariances between state variables and a given obs  
	+ 'obs_epoch' -- load obs_epoch_XXXX.nc files  
	+ 'diag' -- load standard  DART Posterior_Diag or Prior_Diag files 
	+ 'truth' -- load true state files from a perfect-model simulation

	"""

	path_found = False
	if E['run_category'] == 'NCAR':
		data_dir_list,truth_dir_list = exp_paths_NCAR(hostname,E['exp_name'])
		path_found = True
	if 'ERA' in E['exp_name']:
		data_dir_list,truth_dir_list = exp_paths_era(date,hostname,diagnostic=E['diagn'])
		path_found = True
	if not path_found:
		data_dir_list,truth_dir_list = exp_paths(hostname,E['exp_name'])


	#------------COVARIANCE FILES  
	if file_type == 'covariance':
		fname = E['exp_name']+'_'+'covariance_'+E['obs_name']+'_'+E['variable']+'_'+date.strftime('%Y-%m-%d')+'.nc'


	#------------OBS EPOCH FILES
	if file_type == 'obs_epoch':
		DR = get_experiment_date_ranges(E['exp_name'])
		delta_time = date-DR[0]
		obs_epoch_no = delta_time.days+1
		if obs_epoch_no < 10:
			obs_epoch_name = 'obs_epoch_00'+str(obs_epoch_no)+'.nc'
		if (obs_epoch_no >= 10) and (obs_epoch_no < 100): 
			obs_epoch_name = 'obs_epoch_0'+str(obs_epoch_no)+'.nc'
		if (obs_epoch_no >= 100): 
			obs_epoch_name = 'obs_epoch_'+str(obs_epoch_no)+'.nc'
		if E['run_category'] is None:
			fname = '/dart/hist/'+obs_epoch_name
		if E['run_category'] == 'ERPDA':
			fname = '/../obs_epoch/'+obs_epoch_name


	#------------regular DART output files or true state files 
	if (file_type == 'diag') or (file_type == 'truth'):
		if E['diagn']=='Truth':
			file_type='truth'
		# either load a given date, or a time mean 
		if isinstance(date,str):
			endstring=date
		else:
			datestr = date.strftime("%Y-%m-%d")
			seconds = date.hour*60*60
			if seconds == 0:
				timestr = '00000'
			else:
				timestr = str(seconds)
			endstring =datestr+'-'+timestr
		if E['run_category'] is None:
			diagstring = 'Diag'
			# additional diagnostics files have the 'Diag' string replaced with something else. 
			TIL_variables = ['theta','ptrop','Nsq','P','brunt','ztrop']
			if E['variable'] in TIL_variables:
				diagstring='TIL'

			fname = '/dart/hist/cam_'+E['diagn']+'_'+diagstring+'.'+endstring+'.nc'
			#fname = '/dart/hist/cam_'+E['diagn']+'_'+diagstring+'.'+datestr+'-'+timestr+'.nc'
			fname_truth = '/dart/hist/cam_'+'True_State'+'.'+endstring+'.nc'
		if E['run_category'] == 'ERPDA':
			gday = dart.date_to_gday(date)
			# for all my (Lisa's) old experiments, obs sequence 1 is 1 Jan 2009
			gday1 = dart.date_to_gday(datetime.datetime(2009,1,1,0,0,0))
			obs_seq_no = int(gday-gday1+1)
			if (obs_seq_no < 10):
				mid = 'obs_000'+str(obs_seq_no)
			else:
				mid = 'obs_00'+str(obs_seq_no)
			fname_truth = mid+'/'+'True_State.nc'
			fname = mid+'/'+E['diagn']+'_Diag.nc'
		if E['run_category']=='NCAR':
			if E['exp_name'] == 'NCAR_LAONLY':
				suffix = '_LAONLY'
			else:
				suffix = ''
			fname_truth = '/'+'True_State'+'_'+datestr+'-'+timestr+'.nc'+suffix
			fname = '/'+E['diagn']+'_Diag.'+datestr+'-'+timestr+'.nc'+suffix
		if file_type == 'truth':
			fname = fname_truth
			data_dir_list = truth_dir_list

	# if data_dir_list was not found, throw an error
	if data_dir_list is None:
		print('experiment_settings.py cannot find settings for the following experiment dict:')
		print(E)
		return None


	#-----search for the right files 
	correct_filepath_found = False
	for data_dir in data_dir_list:
		filename = data_dir+fname
		if debug:
			print('Looking for file  '+filename)
		if os.path.exists(filename):
			correct_filepath_found = True
			break

	# return the file filename with path
	return filename
Beispiel #21
0
def get_experiment_date_ranges(exp_name):

    # stored date ranges for various DART experiments
    DR = None

    # CAM experiments for ERP assimilation study
    if exp_name == 'NODA':
        DR = dart.daterange(date_start=datetime.datetime(2009, 1, 1, 0, 0, 0),
                            periods=31,
                            DT='1D')
    if exp_name == 'ERPALL':
        DR = dart.daterange(date_start=datetime.datetime(2009, 1, 1, 0, 0, 0),
                            periods=31,
                            DT='1D')
    if exp_name == 'RST':
        DR = dart.daterange(date_start=datetime.datetime(2009, 1, 1, 0, 0, 0),
                            periods=17,
                            DT='1D')
    if exp_name == 'ERPRST':
        DR = dart.daterange(date_start=datetime.datetime(2009, 1, 1, 0, 0, 0),
                            periods=17,
                            DT='1D')

    # DART-WACCM runs performed at GEOMAR
    if exp_name == 'PMO32':
        DR = dart.daterange(date_start=datetime.datetime(2009, 10, 1, 6, 0, 0),
                            periods=31,
                            DT='6H')
    if exp_name == 'W0910_NODA':
        DR = dart.daterange(date_start=datetime.datetime(
            2009, 10, 1, 12, 0, 0),
                            periods=596,
                            DT='6H')
    if exp_name == 'W0910_GLOBAL':
        DR = dart.daterange(date_start=datetime.datetime(
            2009, 10, 1, 12, 0, 0),
                            periods=596,
                            DT='6H')
    if exp_name == 'W0910_TROPICS':
        DR = dart.daterange(date_start=datetime.datetime(
            2009, 10, 1, 12, 0, 0),
                            periods=596,
                            DT='6H')
    if exp_name == 'W0910_NODART':
        DR = dart.daterange(date_start=datetime.datetime(
            2009, 10, 1, 12, 0, 0),
                            periods=10,
                            DT='6H')
    if exp_name == 'W0910_NOSTOP':
        DR = dart.daterange(date_start=datetime.datetime(
            2009, 10, 1, 12, 0, 0),
                            periods=64,
                            DT='6H')

    # WACCM PMO runs performed by Nick Pedatella at NCAR
    if exp_name == 'NCAR_PMO_CONTROL':
        DR = dart.daterange(date_start=datetime.datetime(2008, 11, 6, 6, 0, 0),
                            periods=72,
                            DT='6H')
    if exp_name == 'NCAR_PMO_LAS':
        DR = dart.daterange(date_start=datetime.datetime(2008, 11, 6, 6, 0, 0),
                            periods=72,
                            DT='6H')
    if exp_name == 'NCAR_PMO_LA':
        DR = dart.daterange(date_start=datetime.datetime(2008, 11, 6, 6, 0, 0),
                            periods=72,
                            DT='6H')

    # WACCM real-obs runs performed by Nick Pedatella at NCAR
    if exp_name == 'NCAR_FULL':
        DR = dart.daterange(date_start=datetime.datetime(2009, 1, 1, 6, 0, 0),
                            periods=204,
                            DT='6H')
    if exp_name == 'NCAR_LAONLY':
        DR = dart.daterange(date_start=datetime.datetime(2009, 1, 1, 6, 0, 0),
                            periods=204,
                            DT='6H')

    if DR is None:
        print('find_paths Cannot find experiment ' + exp_name +
              ' returning...')

    return DR
def plot_DARTobs_scatter_globe(E,projection='miller',coastline_width=0,water_color="#CCF3FF",land_color="#996600",colors=None,compare='QC',QC_list=range(8),alpha=0.5,hostname='taurus',debug=False):

	"""
	This code plots a scatterplot of the horizontal distribution of DART assimilated or evaluated
	observations on a map.
	
	INPUTS:
	E: a DART experiment dictionary. The relevant keys are: 
		'latrange' : gives the lat limits of the plot
		'lonrange' : gives the lon limits of the plot
		'levrange' : only observations between these levels are shown 
		'copystring': a string giving the DART copies to show. If it's a list, we loop over the list
		'obs_name': a string giving the DART observation to show. If it's a list, we loop over the list
		'daterange': range of dates over which to plot the observations 
	projection: the map projection to use (default is "miller")
	coastline_width: width of the coastlines; default is 0 (no coastlines)
	water_color: color given to oceans and lakes (default is cyan/blue)
	land_color: color given to continents (default is an earthy brown)
	obs_color: list of colors assigned to the different types of obs plotted. Default is colorbrewer Paired 
	compare: select either 'QC' to color code the QC values, or 'obs_type' to color code the observation types
		Default is 'QC'
	QC_list = list of QC values to plot. The default is all values from 0 to 7
	alpha: the degree of transparency. default is 0.5
	"""

	#---------set up the map-----------------
	# if plotting a polar stereographic projection, it's better to return all lats and lons, and then 
	# cut off the unwanted regions with map limits -- otherwise we get artifical circles on a square map
	if (projection == 'npstere'): 
		if E['latrange'][0] < 0:
			boundinglat = 0
		else:
			boundinglat =  E['latrange'][0]
		E['latrange'] = [-90,90]
		E['lonrange'] = [0,361]
	if (projection == 'spstere'):
		boundinglat = E['latrange'][1]
		E['latrange'] = [-90,90]
		E['lonrange'] = [0,361]

 	# set up a map projection
	if projection == 'miller':
		maxlat = np.min([E['latrange'][1],90.0])
		minlat = np.max([E['latrange'][0],-90.0])
		map = Basemap(projection='mill',llcrnrlat=minlat,urcrnrlat=maxlat,\
			    llcrnrlon=E['lonrange'][0],urcrnrlon=E['lonrange'][1],resolution='l')
	if 'stere' in projection:
		map = Basemap(projection=projection,boundinglat=boundinglat,lon_0=0,resolution='l')
	if projection == None:
		map = Basemap(projection='ortho',lat_0=54,lon_0=10,resolution='l')

        # draw coastlines, country boundaries, fill continents.
        map.drawcoastlines(linewidth=coastline_width)
	map.drawmapboundary(fill_color=water_color)
		
        # draw lat/lon grid lines every 30 degrees.
        map.drawmeridians(np.arange(0,360,30),linewidth=0.25)
        map.drawparallels(np.arange(-90,90,30),linewidth=0.25)
	map.fillcontinents(color=land_color,lake_color=water_color,alpha=alpha)

	#--------- load the obs on the given day 
	OBS,copy_names,obs_names,lons,lats,levs,QCdict = dart.load_DART_obs_epoch_file(E,debug=debug,hostname=hostname)

	#---------loop over obs types-----------------
	# loop over obs types given in E
	if type(E['obs_name']) is not list:
		obs_type_list = [E['obs_name']]
	else:
		obs_type_list = E['obs_name']

	# define a list of colors if needed 
	if colors is None:
		if compare is 'obs_type':
			if type(E['obs_name']) is not list:
				obs_type_list = [E['obs_name']]
			else:
				obs_type_list = E['obs_name']
			NN = len(obs_type_list)
		if compare is 'QC':
			NN = len(QC_list)
		ncol = np.min([NN,12])
		if ncol < 3:
			ncol=3
		colors = palettable.colorbrewer.qualitative.Dark2.mpl_colors

	# if comparing observation types, loop over them and scatter plot individually 
	if compare is 'obs_type':
		for obs_type,ii in zip(obs_type_list,range(len(obs_type_list))):
			lons_obstype = [lons[i] for i,x in enumerate(obs_names) if obs_type in x]
			lats_obstype = [lats[i] for i,x in enumerate(obs_names) if obs_type in x]

			# scatter the obs over the map 
			x, y = map(lons_obstype,lats_obstype)
			map.scatter(x,y,3,marker='o',color=colors[ii],rasterized=True)

	# if comparing different QC values , loop over the list of obs
	# and select the lats and levs for the desired obs types 
	if compare is 'QC':
		DQC = QCdict['DART quality control            ']
		for QC,ii in zip(QC_list,range(len(QC_list))):
			lons_obstype = [lons[i] for i,x in enumerate(DQC) if QC == x]
			lats_obstype = [lats[i] for i,x in enumerate(DQC) if QC == x]

			# scatter the obs over the map 
			x, y = map(lons_obstype,lats_obstype)
			map.scatter(x,y,3,marker='o',color=colors[ii],rasterized=True)
	return 
Beispiel #23
0
def ano(E,climatology_option = 'NODA',hostname='taurus',verbose=False):

	"""
	Compute anomaly fields relative to some climatology

	Inputs allowed for climatology_option:  
	'NODA': take the ensemble mean of the corresponding no-DA experiment as a 40-year climatology  
	'F_W4_L66': daily climatology of a CESM+WACCM simulation with realistic forcings, 1951-2010
	None: don't subtract out anything -- just return the regular fields in the same shape as other "anomalies"  
	"""

	# load climatology 
	Xclim,lat,lon,lev,DR = load_climatology(E,climatology_option,hostname)

	# change the daterange in the anomalies to suit what was found for climatology  
	if len(DR) != len(E['daterange']):
		print('Changing the experiment daterange to the dates found for the requested climatology')
		E['daterange'] = DR
		d1 = DR[0].strftime("%Y-%m-%d")
		d2 = DR[len(E['daterange'])-1].strftime("%Y-%m-%d")
		print('new daterange goes from '+d1+' to '+d2)

	# some climatologies are only available at daily resolution, so 
	# in that case we have to change the daterange in E to be daily  
	if (climatology_option == 'F_W4_L66'):
		d0 = E['daterange'][0]
		df = E['daterange'][len(E['daterange'])-1]
		days = df-d0
		DRnew =  dart.daterange(date_start=d0, periods=days.days+1, DT='1D')
		E['daterange'] = DRnew

	# load the desired model fields for the experiment
	Xlist = []	# empty list to hold the fields we retrieve for every day  
	for date in E['daterange']:
		X,lat0,lon0,lev0 = DSS.compute_DART_diagn_from_model_h_files(E,date,hostname=hostname,verbose=verbose)
		if X is not None:
			Xs = np.squeeze(X)
			Xlist.append(Xs)
			lat = lat0
			lon = lon0
			lev = lev0

	# check that the right vertical levels were loaded
	if verbose:
		print('------computing daily anomalies for the following vertical levels and variable:-------')
		print(lev)
		print(E['variable'])

	# compute anomalies:
	# for this we turn the model fields into a matrix and subtract from the climatology
	XX = np.concatenate([X[..., np.newaxis] for X in Xlist], axis=len(Xs.shape))
	if climatology_option == None:
		AA = XX
	else:
		# if the climatology does not have shape lat x lon x lev x time, 
		# run swapaxes 2x to get it as such  
		# NOTE: this is still a kludge and probably wont work with all datasets - check this carefully 
		# with your own data 
		XclimS = np.squeeze(Xclim)
		nT = len(DRnew)
		lastdim = len(XclimS.shape)-1
		for s,ii in zip(XclimS.shape,range(len(XclimS.shape))):
			if s == nT:
				time_dim = ii

		# if only retrieveing a single date, don't need to do any reshaping
		# but might need to squeeze out a length-one time dimension
		if nT == 1:
			XclimR = XclimS
			XX = np.squeeze(XX)
		else:
			# if time is the last dimension, don't need to reshape Xclim 
			if time_dim == lastdim: 
				XclimR = XclimS
			# if time is the first dimension, need to reshape Xclim
			if time_dim == 0:	
				Xclim2 = XclimS.swapaxes(0,lastdim)
				XclimR = Xclim2.swapaxes(0,1)


		AA = XX-XclimR

	return AA,XclimR,lat,lon,lev,DR
# Inizializza web3 connettendolo al provider locale ganache
w3 = Web3(Web3.HTTPProvider(args.host))
accounts = w3.eth.accounts
w3.eth.defaultAccount = accounts[0]

if len(accounts) < nPartecipants:
    print(
        "Not enough available Ethereum accounts! At least N_partecipants accounts are needed in order to run this test"
    )
    sys.exit(-1)

accounts = accounts[:nPartecipants]

# Inizializza l'interfaccia per interagire con lo smart contract DART
DARTArtifact = json.load(open(args.build))
d = DART(DARTArtifact['abi'],
         DARTArtifact['networks'][str(args.netid)]['address'], w3)

# -----------------------------------------------------

# Per facilitare la stesura dei test e la lettura dei risultati
# realizza due coppie di dizionari per legare:

# ... principals ad address e viceversa
PR = {}
for idx, addr in enumerate(accounts):
    PR['Principal[' + str(idx + 1) + ']'] = addr
INV_PR = {v: k for k, v in PR.items()}
print("\nPRINCIPALS:")
pprint(PR)

# ... rolenames esadecimali a rolenames stringhe e viceversa
Beispiel #25
0
def HRRS_as_DF(OBS,
               TPbased=False,
               TPbased_vertical_res=50E-3,
               hostname='taurus',
               debug=False):
    """
	Loop over a set of dates and a specified latitude- and longitude range, and return 
	the available high-resolution radiosonde data as a pandas data frame  
	
	INPUTS:
	OBS: a dictionary with the following entries:  
		daterange: a list of datetime objects that give the desired date range  
		latrange: a list giving the bounding latitudes of the desired range 
		lonrange: a list giving the bounding longitudes of the desired range 
		Note that OBS can be a DART experiment dictionary (see DART.py), but the DART/model 
			specific entries are ignored. 
	TPbased: set to True to return the profiles ordered into regularly-spaced altitudes 
		relative to the tropopause  - default is False. 
	hostname: default is taurus 
	debug: set to True to print some stuff out. Default is False. 
	TPbased_vertical_res: resolution of the grid to which we inteprolate the obs doing TP-based 
		coordinates. Default is 50m. 
	"""

    # first read in station information as a dataframe
    stationdata = HRRS_station_data(hostname)

    # initialize an empy list which will hold the data frames for each station and time
    DFlist = []

    # because the HRRS data are sorted by years, loop over the years in the daterange
    DR = OBS['daterange']
    y0 = DR[0].year
    yf = DR[len(DR) - 1].year
    years = range(y0, yf + 1, 1)
    for YYYY in years:

        # load a list of the available stations for that year
        Slist = HRRS_stations_available_per_year(YYYY)

        # trim list down to the ones that fit into the latitude range
        stations_lat = [
            s for s in Slist
            if stationdata.loc[int(s)]['Lat'] >= OBS['latrange'][0]
            and stationdata.loc[int(s)]['Lat'] <= OBS['latrange'][1]
        ]

        # trim list down to the ones that fit into the longitude range
        stations_latlon = [
            s for s in stations_lat
            if stationdata.loc[int(s)]['Lon'] >= OBS['lonrange'][0]
            and stationdata.loc[int(s)]['Lon'] <= OBS['lonrange'][1]
        ]

        # also compute the subset of the requested daterange that fits into this year.
        year_daterange = dart.daterange(date_start=datetime.datetime(
            YYYY, 1, 1, 0, 0, 0),
                                        periods=365 * 4,
                                        DT='6H')
        DR2 = set(year_daterange).intersection(DR)

        # also find the dir where the station data live
        datadir = es.obs_data_paths('HRRS', hostname)

        # now loop over available stations, and for each one, retrieve the data
        # that fit into the requested daterange
        for s in stations_latlon:

            # loop over dates, and retrieve data if available
            for dd in DR2:
                datestr = dd.strftime("%Y%m%d%H")
                ff = datadir + '/' + str(YYYY) + '/' + str(s) + '/' + str(
                    s) + '-' + datestr + '_mod.dat'
                if os.path.exists(ff):

                    if debug:
                        print(ff)

                    # read in the station data
                    if TPbased:
                        D = TP_based_HRRS_data(
                            ff, vertical_res_km=TPbased_vertical_res)
                        alt_to_km = 1.0  # here the altitude is already in km
                        temp_to_K = 0.0
                    else:
                        D = read_HRRS_data(ff)
                        alt_to_km = 1.0E-3  # raw data are in m -- convert to km
                        temp_to_K = 273.15  # raw data need to be converted to kelvin

                    if D is not None:
                        # also add a column holding the date
                        D['Date'] = pd.Series(dd, index=D.index)

                        # also add a column holding the station number
                        D['StationNumber'] = pd.Series(s, index=D.index)

                        # make sure altitude is in km
                        # and temp in Kelvin
                        D['Alt'] = D['Alt'] * alt_to_km
                        D['Temp'] = D['Temp'] + temp_to_K

                        # get rid of some unneeded columns
                        if not TPbased:
                            useless_cols = [
                                'Time', 'Dewpt', 'RH', 'Ucmp', 'Vcmp', 'spd',
                                'dir', 'Wcmp', 'Ele', 'Azi', 'Qp', 'Qt', 'Qrh',
                                'Qu', 'Qv', 'QdZ'
                            ]
                            D.drop(useless_cols, inplace=True, axis=1)

                        # append to list of data frames
                        DFlist.append(D)

    # merge the list of data frames into a single DF using list comprehension
    DFout = pd.concat(DFlist, axis=0)

    return (DFout)
Beispiel #26
0
def HRRS_mean_ztrop_to_csv(DR,hostname='taurus',debug=False):

	"""
	Given a certain daterange, retrieve available high res radiosonde data,
	compute the average tropopause height per station, and store in a 
	csv file. 
	"""
	from TIL import ztrop

	# first read in station information as a dataframe 
	stationdata = HRRS_station_data(hostname)

	# because the HRRS data are sorted by years, loop over the years in the daterange
	y0 = DR[0].year
	yf = DR[len(DR)-1].year
	years = range(y0,yf+1,1)
	for YYYY in years:  

		# load a list of the available stations for that year  
		Slist  = HRRS_stations_available_per_year(YYYY)

		# also compute the subset of the requested daterange that fits into this year. 
		year_daterange =  dart.daterange(date_start=datetime.datetime(YYYY,1,1,0,0,0), periods=365*4, DT='6H')
		DR2 = set(year_daterange).intersection(DR)
		
		# also find the dir where the station data live 
		datadir = es.obs_data_paths('HRRS',hostname)

		# initialize empty dictionary to hold average tropoopause heights per station 
		ztrop_dict=dict()

		# now loop over available stations, and for each one, retrieve the data 
		# that fit into the requested daterange 

		for s in Slist:	
			ztrop_list=[]	# empty list to hold tropopause heights for all available obs per station 

			# loop over dates, and retrieve data if available 
			for dd in DR2:
				datestr = dd.strftime("%Y%m%d%H")
				ff = datadir+'/'+str(YYYY)+'/'+str(s)+'/'+str(s)+'-'+datestr+'_mod.dat'
				if os.path.exists(ff):

					if debug:
						print(ff)

					# read in the station data 
					D = read_HRRS_data(ff)
	
					# compute tropopause height 
					z=D['Alt']/1E3       # Altitude in km 
					T=D['Temp']+273.15      # Temp in Kelvin
					ztropp=ztrop(z=z,T=T,debug=debug,hostname=hostname)

					# add to list if not none  
					if ztropp is not None:
						ztrop_list.append(ztropp)

			# average the tropopause heights and add to dictionary 
			ztrop_dict[s]=np.mean(ztrop_list)

		# turn dict into data frame  
		ZT=pd.Series(data=ztrop_dict, name='ztrop_mean')

		if debug:
			print(ZT)

		# turn dataframe into csv file
		hrrs_path = es.obs_data_paths('HRRS',hostname)
		datestr = DR[0].strftime("%Y%m%d")+'-'+DR[len(DR)-1].strftime("%Y%m%d")+'.csv'
		fname=hrrs_path+'/'+'mean_tropopause_height_per_station_'+datestr
		print('storing file '+fname)
		ZT.to_csv(fname, index=True, sep=',',header=True) 

		return(ZT)
Beispiel #27
0
def correlations_lag_lat_or_lon(E,maxlag,lat_or_lon = 'lon',filter_order=50,climatology_option='NODA',hostname='taurus',verbose=False):

	"""
	compute correlations between U850 or OLR in a reference are and everywhere else, 
	as a function of lag and either latitude or longitude 

	INPUTS:  
	E - a standard DART experiment dictionary, with the variable field and level range corresponding to some MJO variable  
	maxlag: the limit of the lag (in days) that we look at 
	lat_or_lon: choose dimension to preserve after averaging -- 'lat' or 'lon'  
	climatology_option: choose which climatology to take the anomalies to respect with -- default is "NODA"  
	"""

	# change the given daterange to daily resolution, because the lag is specified in days  
	E['daterange'] = dart.change_daterange_to_daily(E['daterange'])

	# compute or load the daily climatology and deviation from climatology  
	anomalies,climatology,lat,lon,lev,DRnew = ano(E,climatology_option = climatology_option,hostname=hostname,verbose=verbose)

	# filter daily anomalies using a Lanczos filter
	AA,FA = filter(anomalies,filter_order,return_as_vector=False)
	
	if E['variable'] == 'U':
		variable_name = 'U'+str(E['levrange'][0])
	else:
		variable_name = E['variable']

	# compute the zonal and meridional mean of the resulting field 
	# the regions we average over depend on whether we want lag-lat, or lag-lon plots
	# also, note thatm by how the filtered anomalies are constructed, the 3rd dimension is always time  
	if lat_or_lon == "lon":
		# select latitudes 10S-10N and average meridionally, then plot correlations as a function of lon  	
		lat1,lon1,FAm = aave('TB',FA,lat,lon,None,variable_name,averaging_dimension='lat')
	if lat_or_lon == "lat":
		# average over the longitude corridor 80-100E and plot correlations as a function of lat
		lat1,lon1,FAm = aave('ZB',FA,lat,lon,None,variable_name,averaging_dimension='lon')


	# area averaging  the desired variable over the Indian Ocean reference point
	if (E['daterange'][0].month  >= 10) or (E['daterange'][0].month  <= 2):
		season = 'winter'
	else:
		season = 'summer'
	lat0,lon0,FA0 = aave('IO',FA,lat,lon,season,variable_name,averaging_dimension="all")

	#------ compute field of correlation coefficients   	
	# empty array size Lag by Lat
	# plus an array to keep track of sample size
	Lag_range = range(-maxlag,maxlag+1)
	nlag = len(Lag_range)
	n = FAm.shape[0]
	R = np.zeros(shape=(nlag,n))
	S = np.zeros(shape=(nlag,n))

	# loop over latitudes
	T = len(FA0)
	for ii in range(n):
		# loop over lags  
		for ilag,L in zip(range(nlag),Lag_range):
			# the time points that we can check go from L to T-L
			# so shorter lags have a larger sample size and are more significant.  
			if L < 0:
				Tsel = range(-L,T)
			if L > 0:
				Tsel = range(0,T-L)
			if L == 0:
				Tsel = range(0,T)

			# loop over the available time points and gather values to compare
			IO = []
			X  = []
			for k in Tsel:
				IO.append(FA0[k+L])
				X.append(FAm[ii,k])

			# now compute the correlation from this list of samples and store in the lag vs lat array  
			rho = np.corrcoef(X,IO)
			if rho != []:
				R[ilag,ii] = rho[1,0]
				S[ilag,ii] = len(IO)
			else:
				R[ilag,ii] = np.nan
				S[ilag,ii] = np.nan
	if lat_or_lon == 'lon':
		space_dim = lon1
	if lat_or_lon == 'lat':
		space_dim = lat1

	L = np.array(Lag_range)

	return R,S,L,space_dim