示例#1
0
    def get_array(self, key):
        """Get all data from file for the given BUFR key."""
        with open(self.filename, "rb") as fh:
            msgCount = 0
            while True:
                bufr = ec.codes_bufr_new_from_file(fh)
                if bufr is None:
                    break

                ec.codes_set(bufr, 'unpack', 1)

                # if is the first message initialise our final array
                if (msgCount == 0):
                    arr = da.from_array(ec.codes_get_array(bufr, key, float),
                                        chunks=CHUNK_SIZE)
                else:
                    tmpArr = da.from_array(ec.codes_get_array(
                        bufr, key, float),
                                           chunks=CHUNK_SIZE)
                    arr = da.concatenate((arr, tmpArr))

                msgCount = msgCount + 1
                ec.codes_release(bufr)

        if arr.size == 1:
            arr = arr[0]

        return arr
示例#2
0
def get_point_index(path, point, model):

    # custom version of the function in container_information_point

    # get clat and clon 1D arrays

    filename_clat = 'icon-eu-eps_europe_icosahedral_time-invariant_2018121000_clat.grib2'
    filename_clon = 'icon-eu-eps_europe_icosahedral_time-invariant_2018121000_clon.grib2'
    with open(path['base'] + path['grid'] + filename_clat, 'rb') as file:
        grib_id = eccodes.codes_grib_new_from_file(file)
        clat = eccodes.codes_get_array(grib_id, 'values')
        eccodes.codes_release(grib_id)
    with open(path['base'] + path['grid'] + filename_clon, 'rb') as file:
        grib_id = eccodes.codes_grib_new_from_file(file)
        clon = eccodes.codes_get_array(grib_id, 'values')
        eccodes.codes_release(grib_id)

    # read out index of native point

    filter_distance = get_latlon_filter_distance(model)
    lat_near = list(np.where(abs(clat - point['lat']) < filter_distance)[0])
    lon_near = list(np.where(abs(clon - point['lon']) < filter_distance)[0])
    id_near = list(set(lat_near).intersection(lon_near))
    id_near.sort()
    distances = np.sqrt( np.square(abs(clat[id_near] - point['lat']) * 111.2) \
                        + np.square(abs(clon[id_near] - point['lon']) * 111.2 \
                                                 * np.cos(point['lat']*np.pi/180)) )
    index_nearest = id_near[np.argmin(distances)]

    #print(id_near)
    #print(distances)
    #print(index_nearest)
    #print(clat[index_nearest], clon[index_nearest])

    return index_nearest
示例#3
0
def test_grib_get_array():
    gid = eccodes.codes_grib_new_from_samples("reduced_gg_pl_160_grib1")
    pl = eccodes.codes_get_array(gid, "pl")
    assert pl[0] == 18
    pli = eccodes.codes_get_array(gid, "pl", int)
    assert np.array_equal(pl, pli)
    pls = eccodes.codes_get_array(gid, "centre", str)
    assert pls == ["ecmf"]
    eccodes.codes_release(gid)
示例#4
0
    def get_subset_values(self, subset_nr , autoget_cval=False):
         #  #[
        """
        request an array of values containing the values
        for a given subset for this bufr message
        """
        if (self.msg_loaded == -1):
            raise NoMsgLoadedError

        data = []
        field_names = self._get_abbr_names(subset_nr)
        print('field_names = ', field_names)
        print('DEBUG: names = ',self.get_names(subset_nr))
        for field in field_names:
            if field[0] in string.digits:
                print('cannot get data for field: ',field)
                continue
            print('trying field name: ', field)
            s = eccodes.codes_get_size(self.bufr_id,field)
            if s==1:
                value = eccodes.codes_get(self.bufr_id,field)
                data.append(value)
            else:
                values = eccodes.codes_get_array(self.bufr_id,field)
                data.append(values[subset_nr])

        return data
示例#5
0
def decode_bufr_array(msgid, key):
    '''Returns a numpy maksed array of values corresponding to the fieldname.
    None if there is an error.
    '''
    if key == 'datetime':
        return bufr_typical_timestamp(msgid)
    try:
        arr = eccodes.codes_get_array(msgid, key)
        if len(arr) == 0:
            return None
        dtype = eccodes.codes_get_native_type(msgid, key)
        if dtype is str:
            arr = numpy.char.strip(arr)
        if not isinstance(arr, numpy.ndarray):
            values = numpy.asarray(arr, dtype=dtype)
        else:
            values = arr
        if dtype is int:
            result = numpy.ma.masked_values(values, eccodes.CODES_MISSING_LONG)
        elif dtype is float:
            result = numpy.ma.masked_values(values,
                                            eccodes.CODES_MISSING_DOUBLE)
        else:
            result = numpy.ma.masked_array(data=values)
        return result
    except eccodes.CodesInternalError:
        return None
示例#6
0
def main():

    #point = dict(lat = 49.014, lon =  8.404, name = 'Karlsruhe')
    point = dict(lat = 50.822, lon =  8.920, name = 'Kirchhain')
    #point = dict(lat = 65.661, lon =-18.718, name = 'Iceland')


    path = dict(base = '/lsdfos/kit/imk-tro/projects/MOD/Gruppe_Knippertz/nw5893/',
                data = 'forecast_archive/pamore/kirchhain/run_12Z/',
                grid = 'forecast_archive/icon-eu-eps/grid/',
                plots = 'plots/kirchhain/run_12Z/')

    data_rain_gsp_sum = np.empty((25, 40, 75948))
    data_rain_con_sum = np.empty((25, 40, 75948))
    hours = list(range(0, 12+1))

    filenames_all = []
    for hour in hours:
        filenames_all.append(['iefff0{:03d}0000.m0{:02d}'.format(hour, member) for member in range(1, 41)])

    with ExitStack() as stack:
        files_all = [[stack.enter_context(open(path['base'] + path['data'] + filename,'rb'))\
          for filename in filenames_of_one_hour] for filenames_of_one_hour in filenames_all]

        for i, files_of_one_hour in enumerate(files_all):
            for j, file in enumerate(files_of_one_hour):
                grib_id = eccodes.codes_grib_new_from_file(file)
                data_rain_gsp_sum[i - hours[0], j, :] = eccodes.codes_get_array(grib_id, 'values')
                grib_id = eccodes.codes_grib_new_from_file(file)
                data_rain_con_sum[i - hours[0], j, :] = eccodes.codes_get_array(grib_id, 'values')
                eccodes.codes_release(grib_id)
    data_rain_tot_sum = data_rain_gsp_sum + data_rain_con_sum
    del data_rain_gsp_sum, data_rain_con_sum, files_all

    print(data_rain_tot_sum.max(axis=1).max(axis=1))
    print('data_rain_tot_sum: {:.0f}MB'.format(data_rain_tot_sum.nbytes / 1e6))

    stat_processing = 'max'
    member = None
    plot_rain_around_point(path, data_rain_tot_sum, 0, 12, point, stat_processing, member)

    '''stat_processing = 'member_extract'
    for member in range(1,41):
        plot_rain_around_point(path, data_rain_tot_sum, 0, 24, point, stat_processing, member)'''

    return
def fetch_model_output(input_file, time_since_init, short_name, level):
	file = open(input_file, "rb")
	gid = ec.codes_grib_new_from_file(file)
	file.close()
	values = read_grib_array(input_file, short_name, time_since_init, level)
	lat = np.deg2rad(ec.codes_get_array(gid, "latitudes"))
	lon = np.deg2rad(ec.codes_get_array(gid, "longitudes"))
	no_of_columns = ec.codes_get_long(gid, "Ni")
	no_of_lines = ec.codes_get_long(gid, "Nj")
	ec.codes_release(gid)
	lat_vector = np.zeros([no_of_lines])
	lon_vector = np.zeros([no_of_columns])
	for i in range(no_of_lines):
	    lat_vector[i] = lat[i*no_of_columns]
	for i in range(no_of_columns):
	    lon_vector[i] = lon[i]
	return lat_vector, lon_vector, vector_2_array(values, no_of_lines, no_of_columns)
示例#8
0
 def get(self, key, ktype=None):
     """Get value of a given key as its native or specified type."""
     # if self.missing(key):
     #    raise KeyError("Value of key %s is MISSING." % key)
     if eccodes.codes_get_size(self.codes_id, key) > 1:
         ret = eccodes.codes_get_array(self.codes_id, key, ktype)
     else:
         ret = eccodes.codes_get(self.codes_id, key, ktype)
     return ret
示例#9
0
 def get(self, name):
     # LOG.warn(str(self) + str(name))
     if name in CHEAT:
         return CHEAT[name]
     try:
         if name == "values":
             return eccodes.codes_get_values(self.handle)
         size = eccodes.codes_get_size(self.handle, name)
         LOG.debug(f"{name}:{size}")
         if size and size > 1:
             return eccodes.codes_get_array(self.handle, name)
         return eccodes.codes_get(self.handle, name)
     except eccodes.KeyValueNotFoundError:
         return None
def cli(file_path):
    with open(file_path, 'rb') as f:
        handle = eccodes.codes_grib_new_from_file(f, headers_only=False)
        while handle is not None:
            date = eccodes.codes_get(handle, "dataDate")
            type_of_level = eccodes.codes_get(handle, "typeOfLevel")
            level = eccodes.codes_get(handle, "level")
            values = eccodes.codes_get_array(handle, "values")
            value = values[-1]
            values_array = eccodes.codes_get_values(handle, "values")
            value_array = values[-1]

            print(date, type_of_level, level, value)

            eccodes.codes_release(handle)
            handle = eccodes.codes_grib_new_from_file(f, headers_only=False)
示例#11
0
 def message_get(self, item, key_type=None, default=_MARKER):
     # type: (str, type, T.Any) -> T.Any
     """Get value of a given key as its native or specified type."""
     key = item
     try:
         values = eccodes.codes_get_array(self.codes_id, key, key_type)
         if values is None:
             values = ['unsupported_key_type']
     except eccodes.KeyValueNotFoundError:
         if default is _MARKER:
             raise KeyError(item)
         else:
             return default
     if len(values) == 1:
         return values[0]
     return values
示例#12
0
    def _get_abbr_names(self, subset=1):
        #  #[ request abbr. name of each descriptor for the given subset
        '''
        internal method to get the key name needed to extract the data
        '''
        if (self.msg_loaded == -1):
            raise NoMsgLoadedError

        # remove entries that are not expanded and numeric.
        # these typically are replication codes and not part
        # of the field list
        abbr_names = [n for n in
                      eccodes.codes_get_array(self.bufr_id,
                                              'expandedAbbreviations')
                      if not n[0] in string.digits]
        return abbr_names
示例#13
0
    def get_unexp_descr_list(self):
        #  #[ get unexpanded descfriptor list
        '''
        wrapper around  self.bufr_obj.py_unexp_descr_list
        '''
        if (self.msg_loaded == -1):
            raise NoMsgLoadedError

        n = eccodes.codes_get(self.bufr_id, 'numberOfUnexpandedDescriptors')
        if n==1:
            list_of_unexp_descr = [eccodes.codes_get(self.bufr_id,
                                                'unexpandedDescriptors'),]
        else:
            list_of_unexp_descr = eccodes.codes_get_array(self.bufr_id,
                                                'unexpandedDescriptors')
        
        return list_of_unexp_descr
示例#14
0
 def get_bufr_data(self, key):
     """Get BUFR data by key."""
     attr = np.array([])
     with open(self.filename, 'rb') as fh:
         while True:
             # get handle for message
             bufr = ec.codes_bufr_new_from_file(fh)
             if bufr is None:
                 break
             ec.codes_set(bufr, 'unpack', 1)
             tmp = ec.codes_get_array(bufr, key, float)
             if len(tmp) == 1:
                 size = ec.codes_get(bufr, 'numberOfSubsets')
                 tmp = np.resize(tmp, size)
             attr = np.append(attr, tmp)
             ec.codes_release(bufr)
     return attr
示例#15
0
 def message_get(self, item, key_type=None, default=_MARKER):
     # type: (str, T.Optional[type], T.Any) -> T.Any
     """Get value of a given key as its native or specified type."""
     try:
         values = eccodes.codes_get_array(self.codes_id, item, key_type)
         if values is None:
             values = ["unsupported_key_type"]
     except eccodes.KeyValueNotFoundError:
         if default is _MARKER:
             raise KeyError(item)
         else:
             return default
     if len(values) == 1:
         if isinstance(values, np.ndarray):
             values = values.tolist()
         return values[0]
     return values
示例#16
0
文件: z_on_ml.py 项目: climatom/BAMS
def get_initial_values(idx, keep_sample=False):
    '''Get the values of surface z, pv and number of levels '''
    codes_index_select(idx, 'level', 1)
    codes_index_select(idx, 'step', 0)
    codes_index_select(idx, 'shortName', 'z')
    gid = codes_new_from_index(idx)

    values = {}
    # surface geopotential
    values['z'] = codes_get_values(gid)
    values['pv'] = codes_get_array(gid, 'pv')
    values['nlevels'] = codes_get(gid, 'NV', int) // 2 - 1
    check_max_level(idx, values)
    if keep_sample:
        values['sample'] = gid
    else:
        codes_release(gid)
    return values
示例#17
0
    def get_num_elements(self):
        #  #[
        """
        request the number of elements (descriptors) in the current subset
        """
        if (self.msg_loaded == -1):
            raise NoMsgLoadedError

        fieldnames = eccodes.codes_get_array(self.bufr_id, 'expandedNames')

        # other possibilities are:
        # 'expandedAbbreviations'
        # 'expandedNames'
        # 'expandedUnits'
        # 'expandedOriginalScales'
        # 'expandedOriginalReferences'
        # 'expandedOriginalWidths'
        
        return len(fieldnames)
示例#18
0
 def extract_msg_date_extremes(self, bufr, date_min=None, date_max=None):
     """Extract the minimum and maximum dates from a single bufr message."""
     ec.codes_set(bufr, 'unpack', 1)
     size = ec.codes_get(bufr, 'numberOfSubsets')
     years = np.resize(ec.codes_get_array(bufr, 'year'), size)
     months = np.resize(ec.codes_get_array(bufr, 'month'), size)
     days = np.resize(ec.codes_get_array(bufr, 'day'), size)
     hours = np.resize(ec.codes_get_array(bufr, 'hour'), size)
     minutes = np.resize(ec.codes_get_array(bufr, 'minute'), size)
     seconds = np.resize(ec.codes_get_array(bufr, 'second'), size)
     for year, month, day, hour, minute, second in zip(years, months, days, hours, minutes, seconds):
         time_stamp = datetime(year, month, day, hour, minute, second)
         date_min = time_stamp if not date_min else min(date_min, time_stamp)
         date_max = time_stamp if not date_max else max(date_max, time_stamp)
     return date_min, date_max
示例#19
0
    def get_values(self, descr_nr, autoget_cval=False):
        #  #[
        """
        request an array of values containing the values
        for a given descriptor number for all subsets
        NOTE: this may not work for templates using delayed replication.
        """
        if (self.msg_loaded == -1):
            raise NoMsgLoadedError

        list_of_names = self._get_abbr_names()
        keyname = list_of_names[descr_nr]
        print('keyname: ', keyname)
        s = eccodes.codes_get_size(self.bufr_id,keyname)
        t = eccodes.codes_get_native_type(self.bufr_id, keyname)
        print('key:', keyname, 'size = ', s, 'type = ', t)

        if s==1: # or t==str:
            # values = eccodes.codes_get_string(bufr_id, keyname)
            values = [eccodes.codes_get(self.bufr_id, keyname),]
        else:
            values = eccodes.codes_get_array(self.bufr_id, keyname)

        return values
示例#20
0
    i + 1 for i in range(len(varNames))
    if (varParCat[i] == 1 and varParNum[i] == 23 and levels[i] in levsIncl)
])  #Ice mixing ratio
gidICEMR = gidICEMR[::-1]
gidSNOWMR = np.flipud([
    i + 1 for i in range(len(varNames))
    if (varParCat[i] == 1 and varParNum[i] == 25 and levels[i] in levsIncl)
])  #Snow mixing ratio
gidSNOWMR = gidSNOWMR[::-1]
gidGRPMR = np.flipud([
    i + 1 for i in range(len(varNames))
    if (varParCat[i] == 1 and varParNum[i] == 32 and levels[i] in levsIncl)
])  #Graupel mixing ratio
gidGRPMR = gidGRPMR[::-1]

lats = eccodes.codes_get_array(gidPRMSL, 'distinctLatitudes')
lons = eccodes.codes_get_array(gidPRMSL, 'distinctLongitudes')
Ni = eccodes.codes_get(gidPRMSL, 'Ni')
Nj = eccodes.codes_get(gidPRMSL, 'Nj')

for i in range(len(lats) - 1, -1, -1):
    if lats[i] >= latMinCP:
        iLatMinGRIB = i + 1
        print "i", (i), lats[i], lats[i + 1]
        break
for i in range(len(lats) - 1, -1, -1):
    if lats[i] > latMaxCP:
        iLatMaxGRIB = i
        break
for i in range(len(lons) - 1):
    if lons[i + 1] >= lonMinCP:
示例#21
0
def plot_contourmap(path, date, fcst_hours, variable, stat_processing_methods):

    ##### generate subpath and filename #####

    subpath = 'run_{}{:02}{:02}{:02}/{}/'.format(\
                    date['year'], date['month'], date['day'], date['hour'], variable)

    filename1 = 'icon-eu-eps_europe_icosahedral_single-level_{}{:02}{:02}{:02}_{:03}_{}.grib2'.format(\
                    date['year'], date['month'], date['day'], date['hour'], fcst_hours[0], variable)

    filename2 = 'icon-eu-eps_europe_icosahedral_single-level_{}{:02}{:02}{:02}_{:03}_{}.grib2'.format(\
                    date['year'], date['month'], date['day'], date['hour'], fcst_hours[1], variable)

    ########################################################################
    ###  read data                                                       ###
    ########################################################################

    ##### create empty numpy arrays #####
    ##### 2 fcst_hours, 40 members, 75948 eu gridpoints #####

    data_raw = np.empty((2, 40, 75948))
    data_members = np.empty((40, 75948))

    ##### every time in loop open next grib msg from grib file #####
    ##### grib messages in dwd file are sorted by increasing member number #####

    with open(path['base'] + path['data'] + subpath + filename1, 'rb') as file:
        for member in range(1, 41):
            print('read data from member {}'.format(member))
            grib_msg_id = eccodes.codes_grib_new_from_file(file)
            data_raw[0, member - 1, :] = eccodes.codes_get_array(
                grib_msg_id, 'values')
            eccodes.codes_release(grib_msg_id)
    with open(path['base'] + path['data'] + subpath + filename2, 'rb') as file:
        for member in range(1, 41):
            print('read data from member {}'.format(member))
            grib_msg_id = eccodes.codes_grib_new_from_file(file)
            data_raw[1, member - 1, :] = eccodes.codes_get_array(
                grib_msg_id, 'values')
            eccodes.codes_release(grib_msg_id)

    ##### take the difference of the two accumulated total precipitation arrays #####

    data_members = data_raw[1, :, :] - data_raw[0, :, :]
    del data_raw

    ##### open icon-eps grid file #####

    icongrid_file = nc.Dataset(
        path['base'] + path['grid'] + 'icon_grid_0028_R02B07_N02.nc', 'r')
    vlat = icongrid_file.variables['clat_vertices'][:].data * 180. / np.pi
    vlon = icongrid_file.variables['clon_vertices'][:].data * 180. / np.pi
    clat = icongrid_file.variables['clat'][:].data * 180. / np.pi
    clon = icongrid_file.variables['clon'][:].data * 180. / np.pi
    icongrid_file.close()

    for stat_processing in stat_processing_methods:

        ########################################################################
        ###  statistically process data                                      ###
        ########################################################################

        if stat_processing == 'mean':
            data_processed = data_members.mean(axis=0)
        elif stat_processing == 'max':
            data_processed = data_members.max(axis=0)
        elif stat_processing == 'min':
            data_processed = data_members.min(axis=0)
        elif stat_processing == '90p':
            data_processed = np.percentile(data_members, 90, axis=0)
        elif stat_processing == '75p':
            data_processed = np.percentile(data_members, 75, axis=0)
        elif stat_processing == '50p':
            data_processed = np.percentile(data_members, 50, axis=0)
        elif stat_processing == '25p':
            data_processed = np.percentile(data_members, 25, axis=0)
        elif stat_processing == '10p':
            data_processed = np.percentile(data_members, 10, axis=0)

        #print('shape of data_members: {}'.format(np.shape(data_members)))
        #print('shape of data_processed: {}'.format(np.shape(data_processed)))

    ########################################################################
    ###  plot data on world map                                          ###
    ########################################################################

    ##### set domain due to center point and radius #####

        center_point = dict(lat=48.5, lon=9.0)
        radius = 1800  # domain radius in km around center_point

        domain = dict(
            lat_min=center_point['lat'] - radius / 111.2,
            lat_max=center_point['lat'] + radius / 111.2,
            lon_min=center_point['lon'] - radius /
            (111.2 * np.cos(center_point['lat'] * np.pi / 180)),
            lon_max=center_point['lon'] + radius /
            (111.2 * np.cos(center_point['lat'] * np.pi / 180)),
        )

        ##### or set domain manually in deg N/E #####
        '''domain = dict(
                        lat_min = 0.0,
                        lat_max = 20.0,
                        lon_min = 0.0,
                        lon_max = 20.0,
                        )'''

        ##### set image size (should be squared) #####
        ##### plotting area in pyngl can not exceed squared area even if plotting on rectangular images #####
        ##### for obtaining rectangular plots on has to cut manually afterwards e.g. with pillow package #####

        x_resolution = 800
        y_resolution = 800
        wks_res = Ngl.Resources()
        wks_res.wkWidth = x_resolution
        wks_res.wkHeight = y_resolution

        plot_name = 'contourplot_icon-eu-eps_tot_prec_{:02d}-{:02d}h_{}'.format(\
                        fcst_hours[0], fcst_hours[1], stat_processing)
        wks_type = 'png'
        wks = Ngl.open_wks(wks_type, path['base'] + path['plots'] + plot_name,
                           wks_res)
        resources = Ngl.Resources(
        )  # create resources object containing all the plot settings

        resources.mpProjection = 'Hammer'  # projection type
        resources.mpCenterLonF = (domain['lon_max'] + domain['lon_min']
                                  ) / 2  # projection center point
        resources.mpCenterLatF = (domain['lat_max'] + domain['lat_min']) / 2

        resources.mpLimitMode = 'latlon'
        resources.mpMinLonF = domain['lon_min']
        resources.mpMaxLonF = domain['lon_max']
        resources.mpMinLatF = domain['lat_min']
        resources.mpMaxLatF = domain['lat_max']

        ##### set plot area #####

        resources.nglMaximize = False
        resources.vpXF = 0.05
        resources.vpYF = 0.9
        resources.vpWidthF = 0.7
        resources.vpHeightF = 0.7

        ##### set all map plot settings #####

        resources.mpFillOn = True  # turn on filled map areas
        resources.mpFillColors = [
            'pink', 'blue', 'white', 'white'
        ]  # set colors for [FillValue, Ocean, Land , InlandWater]

        resources.mpDataBaseVersion = 'MediumRes'  # quality of national borders
        resources.mpDataSetName = 'Earth..4'
        resources.mpOutlineBoundarySets = 'national'

        #resources.mpDataBaseVersion         = 'HighRes'
        #resources.mpDataResolution          = 'Fine'
        resources.mpGeophysicalLineThicknessF = 7.0 * x_resolution / 1000  # keep borders thickness resolution-independent
        resources.mpNationalLineThicknessF = 7.0 * x_resolution / 1000
        #resources.mpGridAndLimbDrawOrder        = 'postdraw'

        resources.mpGridAndLimbOn = False  # turn off geographic coordinates grid
        #resources.mpLimbLineColor               = 'black'
        #resources.mpLimbLineThicknessF          = 10
        #resources.mpGridLineColor               = 'black'
        #resources.mpGridLineThicknessF          = 1.0
        #resources.mpGridSpacingF                = 1

        resources.mpPerimOn = True  # turn on perimeter around plot
        resources.mpPerimLineColor = 'black'
        resources.mpPerimLineThicknessF = 8.0 * x_resolution / 1000  # keep perimeter thickness resolution-independent

        resources.tmXBOn = False  # turn off location ticks around plot
        resources.tmXTOn = False
        resources.tmYLOn = False
        resources.tmYROn = False

        resources.sfDataArray = data_processed  # data input file to plot
        resources.sfXArray = clon  # array with cell center locations
        resources.sfYArray = clat
        resources.sfXCellBounds = vlon  # array with cell vertices locations
        resources.sfYCellBounds = vlat
        resources.sfMissingValueV = 9999  # in case you want to mask values

        resources.cnFillOn = True
        resources.cnFillMode = 'CellFill'
        #resources.cnCellFillEdgeColor   = 'black'      # uncomment this for plotting the cell edges

        resources.cnMissingValFillColor = 'black'
        resources.cnFillPalette = 'WhiteBlueGreenYellowRed'  # color palette
        resources.cnLevelSelectionMode = 'ManualLevels'

        minlevel = 0.0  # min level of colorbar
        maxlevel = 50.0  # max level of colorbar
        numberoflevels = 250  # number of levels of colorbar, max. 250 with this color palette
        resources.cnMinLevelValF = minlevel
        resources.cnMaxLevelValF = maxlevel
        resources.cnLevelSpacingF = (maxlevel - minlevel) / numberoflevels

        resources.cnLinesOn = False  # turn off contour lines
        resources.cnLineLabelsOn = False  # turn off contour labels

        ##### set resources for a nice colorbar #####

        resources.lbLabelBarOn = True
        resources.lbAutoManage = False
        resources.lbOrientation = 'vertical'
        resources.lbLabelOffsetF = 0.05
        #resources.lbBoxMinorExtentF     = 0.2

        resources.lbLabelStride = 25  # print a tick every 25 levels
        resources.lbLabelFontHeightF = 0.016
        resources.lbBoxSeparatorLinesOn = False
        resources.lbBoxLineThicknessF = 4.0
        #resources.lbBoxEndCapStyle     = 'TriangleBothEnds'
        resources.lbLabelAlignment = 'BoxCenters'

        resources.lbTitleString = 'mm'
        resources.lbTitleFontHeightF = 0.016
        resources.lbTitlePosition = 'Right'
        resources.lbTitleDirection = 'Across'
        resources.lbTitleAngleF = 90.0
        resources.lbTitleExtentF = 0.1
        resources.lbTitleOffsetF = -0.15

        resources.nglFrame = False  # hold on frame because will plot text afterwards on same plot
        Ngl.contour_map(wks, data_processed, resources)  # plot the actual plot

        ##### plot title text #####

        text = '{:02d}-{:02d}h Total Precipitation {}, ICON-EPS run {:02}.{:02}.{} {:02}Z'.format(\
                    fcst_hours[0], fcst_hours[1], stat_processing,\
                    date['day'], date['month'], date['year'], date['hour'])
        x = 0.5
        y = 0.95

        text_res_1 = Ngl.Resources()
        text_res_1.txFontHeightF = 0.018
        text_res_1.txJust = 'TopCenter'

        Ngl.text_ndc(wks, text, x, y, text_res_1)

        Ngl.frame(wks)  # advance frame
        Ngl.destroy(
            wks
        )  # delete workspace to free memory, relevant if plotting various plots in one script

        print('plotted "{}.png"'.format(plot_name))

    return
示例#22
0
def get_ecc_msg(gid, namespace=None, skipkeys=None):
    """Read data from one particular ecc message

    Parameters
    ----------
    gid : ecc message id
    namespace : string
        namespace to be retrieved, defaults to None (means all)
        'ls', 'parameter', 'time', 'geography', 'vertical', 'statistics', 'mars'
    skipkeys  : list of strings
        keys to be skipped, defaults to None
        possible keys: 'computed', 'coded', 'edition', 'duplicates', 'read_only', 'function'
        

    Returns
    -------
    data : dictionary of ecc message contents 
    """
    
    # get key iterator
    iterid = ecc.codes_keys_iterator_new(gid, namespace)

    # Different types of keys can be skipped
    if skipkeys:
        if 'computed' in skipkeys:
            ecc.codes_skip_computed(iterid)
        if 'coded' in skipkeys:
            ecc.codes_skip_coded(iterid)
        if 'edition' in skipkeys:
            ecc.codes_skip_edition_specific(iterid)
        if 'duplicates' in skipkeys:
            ecc.codes_skip_duplicates(iterid)
        if 'read_only' in skipkeys:
            ecc.codes_skip_read_only(iterid)
        if 'function' in skipkeys:    
            ecc.codes_skip_function(iterid)
    
    data = OrderedDict()
   
   # iterate over message keys
    while ecc.codes_keys_iterator_next(iterid):


        keyname = ecc.codes_keys_iterator_get_name(iterid)
        #print(keyname)


        #print("Size:", ecc.codes_get_size(gid, keyname))
        #print("Values:", ecc.codes_get_values(gid, keyname))
        #print("Array:", ecc.codes_get_values(gid, keyname))

        # try to get key values,
        # use get_array for sizes > 1 and get for sizes == 1
        if ecc.codes_get_size(gid,keyname) > 1:
            #print("has array", type is str)
            #print(type is not <type 'str'>)
            if ecc.codes_get_native_type(iterid, keyname) is not str:
                keyval = ecc.codes_get_array(gid, keyname, None)
            else:
                keyval = ecc.codes_get(gid, keyname, None)
            #print("Arr:", keyval)
        else:
            # Todo: fix reading mybits
            if keyname not in ['mybits']:
                keyval = ecc.codes_get(gid, keyname, None)
                #print("Val:", keyval)
            else:
                keyval = 'err'

        # add keyname-keyvalue-pair to output dictionary
        data[keyname] = keyval

    #print('Message processed')
    # release iterator
    ecc.codes_keys_iterator_delete(iterid)

    return data
def bufr_decode(f,
                fn,
                archive,
                args,
                fakeTimes=True,
                fakeDisplacement=True,
                logFixup=True):
    ibufr = codes_bufr_new_from_file(f)
    if not ibufr:
        raise BufrUnreadableError("empty file", fn, archive)
    codes_set(ibufr, "unpack", 1)

    missingHdrKeys = 0
    header = {}
    try:
        k = "extendedDelayedDescriptorReplicationFactor"
        num_samples = codes_get_array(ibufr, k)[0]
    except Exception as e:
        codes_release(ibufr)
        raise MissingKeyError(k,
                              message=f"cant determine number of samples: {e}")

    # BAIL HERE if no num_samples

    ivals = [
        "typicalYear",
        "typicalMonth",
        "typicalDay",
        "typicalHour",
        "typicalMinute",
        "typicalSecond",
        "blockNumber",
        "stationNumber",
        "radiosondeType",
        "height",
        "year",
        "month",
        "day",
        "hour",
        "minute",
        "second",
        "correctionAlgorithmsForHumidityMeasurements",
        "pressureSensorType",
        "temperatureSensorType",
        "humiditySensorType",
        "geopotentialHeightCalculation",
        "trackingTechniqueOrStatusOfSystem",
        "measuringEquipmentType",
    ]
    fvals = [
        "radiosondeOperatingFrequency",
        "latitude",
        "longitude",
        "heightOfStationGroundAboveMeanSeaLevel",
        "heightOfBarometerAboveMeanSeaLevel",
    ]
    svals = [
        "radiosondeSerialNumber",
        "typicalDate",
        "typicalTime",
        "text",
        "softwareVersionNumber",
    ]

    for k in ivals + fvals + svals:
        try:
            value = codes_get(ibufr, k)
            if k in ivals:
                if value != CODES_MISSING_LONG:
                    header[k] = value
            elif k in fvals:
                if value != CODES_MISSING_DOUBLE:
                    header[k] = value
            elif k in svals:
                header[k] = value
            else:
                pass
        except Exception as e:
            logging.debug(f"missing header key={k} e={e}")
            missingHdrKeys += 1

    # special-case warts we do not really care about
    warts = ["shipOrMobileLandStationIdentifier"]

    for k in warts:
        try:
            header[k] = codes_get(ibufr, k)
        except Exception:
            missingHdrKeys += 1

    fkeys = [  # 'extendedVerticalSoundingSignificance',
        "pressure",
        "nonCoordinateGeopotentialHeight",
        "latitudeDisplacement",
        "longitudeDisplacement",
        "airTemperature",
        "dewpointTemperature",
        "windDirection",
        "windSpeed",
    ]

    samples = []
    invalidSamples = 0
    missingValues = 0
    fakeTimeperiod = 0
    fixups = []  # report once only

    for i in range(1, num_samples + 1):
        sample = {}

        k = "timePeriod"
        timePeriod = codes_get(ibufr, f"#{i}#{k}")
        if timePeriod == CODES_MISSING_LONG:

            invalidSamples += 1
            if not fakeTimes:
                continue
            else:
                timePeriod = fakeTimeperiod
                fakeTimeperiod += FAKE_TIME_STEPS
                if k not in fixups:
                    logging.debug(
                        f"FIXUP timePeriod fakeTimes:{fakeTimes} fakeTimeperiod={fakeTimeperiod}"
                    )
                    fixups.append(k)

        sample[k] = timePeriod
        replaceable = ["latitudeDisplacement", "longitudeDisplacement"]
        sampleOK = True
        for k in fkeys:
            name = f"#{i}#{k}"
            try:
                value = codes_get(ibufr, name)
                if value != CODES_MISSING_DOUBLE:
                    sample[k] = value
                else:
                    if fakeDisplacement and k in replaceable:
                        if k not in fixups:
                            logging.debug(f"--FIXUP  key {k}")
                            fixups.append(k)
                        sample[k] = 0
                    else:
                        # logging.warning(f"--MISSING {i} key {k} ")
                        sampleOK = False
                        missingValues += 1
            except Exception as e:
                sampleOK = False
                logging.debug(f"sample={i} key={k} e={e}, skipping")
                missingValues += 1

        if sampleOK:
            samples.append(sample)

    logging.debug((f"samples used={len(samples)}, invalid samples="
                   f"{invalidSamples}, skipped header keys={missingHdrKeys},"
                   f" missing values={missingValues}"))

    codes_release(ibufr)
    return header, samples
示例#24
0
def main():

    path = dict(
        base='/lsdfos/kit/imk-tro/projects/MOD/Gruppe_Knippertz/nw5893/',
        data='forecast_archive/pamore/heatwave_25.07.19/',
        grid='forecast_archive/icon-eu-eps/grid/',
        plots='plots/heatwave_25.07.19/point_plots/')

    run = dict(year=2019, month=7, day=21, hour=0)

    hours = [
        6, 12, 18, 100, 106, 112, 118, 200, 206, 212, 218, 300, 306, 312, 318,
        400, 406, 412, 418, 500
    ]

    data_tmax_6h = np.empty((len(hours), 40, 75948))

    filenames_all = []
    for hour in hours:
        filenames_all.append([
            'iefff0{:03d}0000.m0{:02d}'.format(hour, member)
            for member in range(1, 41)
        ])
    path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/'.format(
        run['year'], run['month'], run['day'], run['hour'])

    with ExitStack() as stack:
        files_all = [[stack.enter_context(open(path['base'] + path['data'] + path['data_subfolder'] + filename,'rb'))\
          for filename in filenames_of_one_hour] for filenames_of_one_hour in filenames_all]

        for i, files_of_one_hour in enumerate(files_all):
            for j, file in enumerate(files_of_one_hour):
                grib_id = eccodes.codes_grib_new_from_file(file)
                data_tmax_6h[i, j, :] = eccodes.codes_get_array(
                    grib_id, 'values')
                eccodes.codes_release(grib_id)
    del files_all, files_of_one_hour, file

    data_tmax_6h -= 273.15
    data_tmax_24h = data_tmax_6h.reshape(5, 4, 40, 75948).max(axis=1)

    points = []
    points.append(
        dict(lat=49.014,
             lon=8.350,
             name='Karlsruhe',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))
    points.append(
        dict(lat=48.860,
             lon=2.350,
             name='Paris',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))
    points.append(
        dict(lat=50.937,
             lon=6.954,
             name='Koeln',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))
    points.append(
        dict(lat=47.198,
             lon=-1.534,
             name='Nantes',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))
    points.append(
        dict(lat=52.519,
             lon=13.407,
             name='Berlin',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))
    points.append(
        dict(lat=48.240,
             lon=11.570,
             name='Munchen',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))
    points.append(
        dict(lat=52.489,
             lon=-3.462,
             name='Wales',
             measurements=[30.0, 30.0, 30.0, 30.0, 30.0]))

    for point in points:
        index_nearest = get_point_index(path, point, 'icon-eu-eps')
        ens_data = data_tmax_24h[:, :, index_nearest]

        print(point['name'])
        #print(ens_data)

        plot_tmax_uncertainty_shades(path, run, point, ens_data)

    return
示例#25
0
def read_grid_coordinates(model, grid):
    path = dict(base='/')
    path['grid'] = 'data/model_data/{}/grid/'.format(model)

    if model == 'icon-eu-eps':
        if grid == 'icosahedral':
            filename = 'icon_grid_0028_R02B07_N02.nc'
        elif grid == 'latlon_0.2':
            filename = 'icon-eu-eps_latlon_0.2_grid_coordinates.nc'
    elif model == 'icon-global-eps':
        filename = 'icon_grid_0024_R02B06_G.nc'
    elif model == 'icon-eu-det':
        filename_clat = 'icon-eu_europe_regular-lat-lon_time-invariant_2019040800_RLAT.grib2'
        filename_clon = 'icon-eu_europe_regular-lat-lon_time-invariant_2019040800_RLON.grib2'
    elif model == 'icon-global-det':
        if grid == 'icosahedral':
            filename = 'icon_grid_0026_R03B07_G.nc'
        elif grid == 'latlon_0.25':
            filename = 'icon-global-det_latlon_0.25_grid_coordinates.nc'
        elif grid == 'latlon_0.1':
            filename = 'icon-global-det_latlon_0.1_grid_coordinates.nc'

    if grid == 'icosahedral':
        with xr.open_dataset(path['base'] + path['grid'] + filename) as ds:
            clat = ds['clat'].values * 180 / np.pi
            clon = ds['clon'].values * 180 / np.pi
            vlat = ds['clat_vertices'].values * 180 / np.pi
            vlon = ds['clon_vertices'].values * 180 / np.pi
        return clat, clon, vlat, vlon

    elif grid == 'latlon_0.0625':
        with open(path['base'] + path['grid'] + filename_clat, 'rb') as file:
            grib_id = eccodes.codes_grib_new_from_file(file)
            clat = eccodes.codes_get_array(grib_id, 'values')
            eccodes.codes_release(grib_id)
        with open(path['base'] + path['grid'] + filename_clon, 'rb') as file:
            grib_id = eccodes.codes_grib_new_from_file(file)
            clon = eccodes.codes_get_array(grib_id, 'values')
            eccodes.codes_release(grib_id)
        return clat.reshape((657, 1097)), clon.reshape((657, 1097))

    elif grid == 'latlon_0.2':
        with xr.open_dataset(path['base'] + path['grid'] + filename) as ds:
            clat = ds['lat'].values
            clon = ds['lon'].values
        return clat, clon

    elif grid == 'latlon_0.25':
        with xr.open_dataset(path['base'] + path['grid'] + filename) as ds:
            clat = ds['lat'].values
            clon = ds['lon'].values
        clon = np.where(clon > 180, clon - 360, clon)
        clon_new = np.empty_like(clon)
        clon_new[719:] = clon[:721]
        clon_new[:719] = clon[721:]
        return clat, clon_new

    elif grid == 'latlon_0.1':
        with xr.open_dataset(path['base'] + path['grid'] + filename) as ds:
            clat = ds['lat'].values
            clon = ds['lon'].values
        clon = np.where(clon > 180, clon - 360, clon)
        clon_new = np.empty_like(clon)
        clon_new[1799:] = clon[:1801]
        clon_new[:1799] = clon[1801:]
        return clat, clon_new
示例#26
0
def get_point_index(model, point):

    # if known named point get grid point location #

    if not 'lat' in point:
        #print('pointname is known: {}'.format(point['name']))
        point = which_grid_point(point['name'], model)

    path = dict(base='/')
    path['subdir'] = 'data/model_data/{}/grid/'.format(model)

    if model == 'icon-eu-eps':
        filename_clat = 'icon-eu-eps_europe_icosahedral_time-invariant_2018121000_clat.grib2'
        filename_clon = 'icon-eu-eps_europe_icosahedral_time-invariant_2018121000_clon.grib2'
    elif model == 'icon-global-eps':
        filename_clat = 'icon-eps_global_icosahedral_time-invariant_2019010800_clat.grib2'
        filename_clon = 'icon-eps_global_icosahedral_time-invariant_2019010800_clon.grib2'
    elif model == 'icon-eu-det':
        filename_clat = 'icon-eu_europe_regular-lat-lon_time-invariant_2019040800_RLAT.grib2'
        filename_clon = 'icon-eu_europe_regular-lat-lon_time-invariant_2019040800_RLON.grib2'
    elif model == 'icon-global-det':
        filename_clat = 'icon_global_icosahedral_time-invariant_2020020700_CLAT.grib2'
        filename_clon = 'icon_global_icosahedral_time-invariant_2020020700_CLON.grib2'

    # get clat and clon 1D arrays #

    with open(path['base'] + path['subdir'] + filename_clat, 'rb') as file:
        grib_id = eccodes.codes_grib_new_from_file(file)
        clat = eccodes.codes_get_array(grib_id, 'values')
        eccodes.codes_release(grib_id)
    with open(path['base'] + path['subdir'] + filename_clon, 'rb') as file:
        grib_id = eccodes.codes_grib_new_from_file(file)
        clon = eccodes.codes_get_array(grib_id, 'values')
        eccodes.codes_release(grib_id)

    # read out index of nearest icosahedral point #

    filter_distance = get_latlon_filter_distance(model)
    lat_near = list(np.where(abs(clat - point['lat']) < filter_distance)[0])
    lon_near = list(np.where(abs(clon - point['lon']) < filter_distance)[0])
    id_near = list(set(lat_near).intersection(lon_near))
    id_near.sort()
    distances = np.sqrt( np.square(abs(clat[id_near] - point['lat']) * 111.2) \
                        + np.square(abs(clon[id_near] - point['lon']) * 111.2 \
                                                 * np.cos(point['lat']*np.pi/180)) )
    index_nearest = id_near[np.argmin(distances)]

    #print(id_near)
    #print(distances)
    #print(index_nearest)
    #print(clat[index_nearest], clon[index_nearest])

    if model == 'icon-eu-det':
        # this model data is on regular lat-lon-grid #
        lat_index = int((clat[index_nearest] - 29.5) / 0.0625)
        lon_index = int((clon[index_nearest] + 23.5) / 0.0625)
        point_index = [lat_index, lon_index]
    else:
        # all other model data is on icosahedral grid with single index #
        point_index = [index_nearest]

    return point_index
示例#27
0
def read_grib_array(filename, short_name, time_since_init, level):
    gid = scan_for_gid(filename, short_name, time_since_init, level)
    return_array = ec.codes_get_array(gid, "values")
    ec.codes_release(gid)
    return return_array
示例#28
0
    def select_messages(self, **kwargs):
        self._selected_grbs = self._get_gids(**kwargs)
        self._log(f'Selected {len(self._selected_grbs)} grib messages')

        if len(self._selected_grbs) > 0:
            self._gid_main_res = self._selected_grbs[0]
            grid = GribGridDetails(self._selected_grbs[0])
            # some cumulated messages come with the message at step=0 as instant, to permit aggregation
            # cumulated rainfall rates could have the step zero instant message as kg/m^2, instead of kg/(m^2*s)
            if len(self._selected_grbs) > 1:
                unit = codes_get(self._selected_grbs[1], 'units')
                type_of_step = codes_get(self._selected_grbs[1], 'stepType')
            else:
                type_of_step = codes_get(self._selected_grbs[0], 'stepType')
                unit = codes_get(self._selected_grbs[0], 'units')
            type_of_level = codes_get(self._selected_grbs[0], 'levelType')

            missing_value = codes_get(self._selected_grbs[0], 'missingValue')
            data_date = codes_get(self._selected_grbs[0], 'dataDate')
            all_values = {}
            all_values_second_res = {}
            grid2 = None
            input_step = self._step_grib
            for g in self._selected_grbs:
                start_step = codes_get(g, 'startStep')
                end_step = codes_get(g, 'endStep')
                points_meridian = codes_get(g, 'Nj')
                level = codes_get(g, 'level')
                if f'{start_step}-{end_step}' == self._change_step_at:
                    # second time resolution
                    input_step = self._step_grib2

                step_key = Step(start_step, end_step, points_meridian,
                                input_step, level)

                if points_meridian != grid.num_points_along_meridian and grid.get_2nd_resolution(
                ) is None:
                    # found second resolution messages
                    grid2 = GribGridDetails(g)
                    self._gid_ext_res = g
                values = codes_get_double_array(g, 'values')

                # Handling missing grib values.
                # If bitmap is present, array will be a masked_array
                # and array.mask will be used later
                # in interpolation and manipulation
                bitmap_present = codes_get(g, 'bitmapPresent')
                if bitmap_present:
                    # Get the bitmap array which contains 0s and 1s
                    bitmap = codes_get_array(g, 'bitmap', int)
                    values = ma.masked_where(bitmap == 0, values, copy=False)

                if not grid2:
                    all_values[step_key] = values
                elif points_meridian != grid.num_points_along_meridian:
                    all_values_second_res[step_key] = values

            if grid2:
                key_2nd_spatial_res = min(all_values_second_res.keys())
                grid.set_2nd_resolution(grid2, key_2nd_spatial_res)
            return Messages(all_values,
                            missing_value,
                            unit,
                            type_of_level,
                            type_of_step,
                            grid,
                            all_values_second_res,
                            data_date=data_date)
        # no messages found
        else:
            raise ApplicationException.get_exc(NO_MESSAGES,
                                               details=f'using {kwargs}')
示例#29
0
def read_grib_array(filename, short_name):
    gid = scan_for_gid(filename, short_name)
    return_array = ec.codes_get_array(gid, "values")
    ec.codes_release(gid)
    return return_array
示例#30
0
    def get_units(self, subset=1):
        #  #[ request unit of each descriptor for the given subset
        if (self.msg_loaded == -1):
            raise NoMsgLoadedError

        return eccodes.codes_get_array(self.bufr_id, 'expandedUnits')
示例#31
0
def read_grib_property(filename, short_name, key):
    gid = scan_for_gid(filename, short_name)
    value = ec.codes_get_array(gid, key)
    ec.codes_release(gid)
    return value[0]
def main():

    path = dict(
        base='/lsdfos/kit/imk-tro/projects/MOD/Gruppe_Knippertz/nw5893/',
        data='forecast_archive/pamore/heatwave_25.07.19/',
        grid='forecast_archive/icon-eu-eps/grid/',
        plots='plots/heatwave_25.07.19/',
        colorpalette='additional_data/colorpalettes/')

    for lead_days in [4]:  #list(range(5)):
        run = dict(year=2019, month=7, day=25 - lead_days, hour=0)
        hours = [6, 12, 18, 100]
        for i in range(len(hours)):
            hours[i] += lead_days * 100

        data_tmax_6h = np.empty((4, 40, 75948))

        filenames_all = []
        for hour in hours:
            filenames_all.append([
                'iefff0{:03d}0000.m0{:02d}'.format(hour, member)
                for member in range(1, 41)
            ])
        path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/'.format(
            run['year'], run['month'], run['day'], run['hour'])

        with ExitStack() as stack:
            files_all = [[stack.enter_context(open(path['base'] + path['data'] + path['data_subfolder'] + filename,'rb'))\
              for filename in filenames_of_one_hour] for filenames_of_one_hour in filenames_all]

            for i, files_of_one_hour in enumerate(files_all):
                for j, file in enumerate(files_of_one_hour):
                    grib_id = eccodes.codes_grib_new_from_file(file)
                    data_tmax_6h[i, j, :] = eccodes.codes_get_array(
                        grib_id, 'values')
                    eccodes.codes_release(grib_id)
        del files_all, files_of_one_hour, file

        data_tmax_6h -= 273.15
        data_tmax_24h = data_tmax_6h.max(axis=0)

        ##### settings for plotting #####

        #point = dict(lat = 49.01, lon =  8.40, name = 'Karlsruhe')
        #point = dict(lat = 50.82, lon =  8.92, name = 'Kirchhain')
        #domain = dict(method = 'deltalatlon', radius =    0, deltalat = 700, deltalon = 760,\
        #              lat = 48.7, lon =  5.4, name = 'france-germany')
        domain = dict(method = 'deltalatlon', radius =    0, deltalat = 850, deltalon = 680,\
                      lat = 48.4, lon =  5.0, name = 'france-germany')

        #stat_processing = dict(method = 'max')
        #stat_processing = dict(method = 'min')
        #stat_processing = dict(method = 'median')
        #stat_processing = dict(method = 'spread')
        #stat_processing = dict(method = 'member_extract', member = 1)

        thresholds = []
        #thresholds.append(30.0)
        thresholds.append(36.0)
        #thresholds.append(38.0)
        #thresholds.append(40.0)

        ##### call plotting function #####

        #for stat_processing['member'] in range(1,41):
        #plot_statistical_value_around_point(path, run, data_tmax_24h, point, stat_processing)

        for threshold in thresholds:
            plot_prob_of_exceedance_around_point(path, run, data_tmax_24h,
                                                 domain, threshold)

    return