Esempio n. 1
0
def generate_periods(cdo, index, out_dir, nc_in=None, nc_in2=None):
    '''
    Generate a map of the index, for the required time periods, starting from matrix
    '''

    from indices_misc import logger, clean
    from indices_misc import period_range_array, period_name_array
    from useful_functions import get_subfiles
    import pathlib

    for season in index['seasons']:
        for year_range, name in zip(period_range_array, period_name_array):
            cdo_year_command = "-selyear," + year_range
            for file, file_path in get_subfiles(out_dir):
                if file.startswith("._"):
                    continue  # does nothing
                elif file.endswith(
                        index['name'] + ".nc"
                ):  # if file ends with index.nc, it is the pure idex file, with matrix values (temp and field)
                    pathlib.Path(out_dir + "/" + season + "/").mkdir(
                        parents=True, exist_ok=True)
                    nc_out = out_dir + "/" + season + "/" + pathlib.Path(
                        file_path).stem + "_" + name + ".nc"
                    logger.debug(clean((nc_out)))
                    logger.debug(clean(("")))
                    cdo.timmean(input="-setreftime,1850-01-01,00:00:00 " +
                                cdo_year_command + " " + file_path,
                                output=nc_out,
                                options='-f nc',
                                force=False,
                                returnCdf=False)
Esempio n. 2
0
def delete_days(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    Delete wrongly generated days in rx5day index monthly
    '''
    from indices_misc import logger, clean
    from useful_functions import get_subfiles
    import pathlib
    from os import rename

    for file, file_path in get_subfiles(out_dir):
        if file.startswith("._"):
            continue  # does nothing
        elif file.endswith(
                index['name'] + ".nc"
        ):  # if file ends with index.nc, it is the pure index file

            nc_out_days = out_dir + "/" + pathlib.Path(
                file_path).stem + "_withDays.nc"
            rename(file_path, nc_out_days)

            logger.debug(clean((file_path)))
            cdo.delete("day=17",
                       input="-delete,month=6,hour=12 " + nc_out_days,
                       output=file_path,
                       options='-f nc',
                       force=False,
                       returnCdf=False)
Esempio n. 3
0
def normal_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    Calculate "normal" indices, that generate a matrix out, without needing any additional input parameters
    '''
    from indices_misc import logger, clean
    import pathlib

    index_cdo_function = getattr(
        cdo, index['cdo_fun']
    )  # get the cdo function coressponding to the input index
    nc_out_normal = (out_dir + '/' + pathlib.Path(nc_in).stem + "_" +
                     index['name'] + ".nc")

    add_params = ''
    add_command = ''
    if 'add_params' in index:
        add_params = index['add_params']
    if 'add_command' in index:
        add_command = index['add_command']

    logger.debug(clean((nc_out_normal)))
    index_cdo_function(add_params,
                       input=add_command + nc_in,
                       output=nc_out_normal,
                       options='-f nc',
                       force=False,
                       returnCdf=False)
Esempio n. 4
0
def generate_ts(cdo, index, out_dir, nc_in=None, nc_in2=None):
    '''
    Generate timeseries of the index, for the whole area, starting from matrix
    '''

    from indices_misc import logger, clean
    from useful_functions import get_subfiles
    import pathlib

    for file, file_path in get_subfiles(out_dir):
        if file.startswith("._"):
            continue  # does nothing
        elif file.endswith(
                index['name'] + ".nc"
        ):  # if file ends with index.nc, it is the pure idex file, with matrix values (temp and field)
            nc_out = out_dir + "/" + pathlib.Path(file_path).stem + "_ts.nc"
            logger.debug(clean((nc_out)))
            logger.debug(clean(("")))
            cdo.fldmean(input="-setreftime,1850-01-01,00:00:00 " + file_path,
                        output=nc_out,
                        options='-f nc',
                        force=False,
                        returnCdf=False)
Esempio n. 5
0
def loop_models():
    # loop the required folders (for indices that require two parameters)
    for experiment in experiment_set:
        # loop of all models inside the experiment folder
        for model, model_path in get_subdirs(experiment):
            param_path_list = {}
            if 'ignore' in index_in and [model, args.rcp] in index_in['ignore']:  # this model/rcp should not be used for this index
                debug(model+' Ignored ')
                continue
            # loop of all parameters inside each model
            for param, param_path in get_subdirs(model_path):
                # if the param is the required one for the index
                if param in index_in['param']:
                    for region, region_path in get_subdirs(param_path):
                        if 'ignore' in index_in and region in index_in['ignore']:
                            continue

                        if region not in param_path_list.keys():
                            param_path_list[region] = [''] * (len(index_in['param']) + 1)  # 1 more for the out_dir
                        # loop all files inside the param path
                        for file, file_path in get_subfiles(region_path):

                            if file.startswith("._"):
                                continue  # does nothing

                            elif file.endswith(".nc"):  # check if file is .nc
                                output_dir = None
                                if "rcp45" in experiment:
                                    output_dir = indices_output_dir + '/' + index_in['name'] + '/' + region + '/rcp45/models/' + model
                                elif "rcp85" in experiment:
                                    output_dir = indices_output_dir + '/' + index_in['name'] + '/' + region + '/rcp85/models/' + model

                                pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

                                if len(index_in['param']) == 1:
                                    for function in index_in['loop_functions']:
                                        function(index=index_in, cdo=cdo, out_dir=output_dir, nc_in=file_path)
                                else:  # index with multiple param
                                    param_path_list[region][-1] = output_dir
                                    param_path_list[region][index_in['param'].index(param)] = file_path
            if len(index_in['param']) > 1:
                for region, paths in param_path_list.items():
                    for function in index_in['loop_functions']:
                        if paths[-1] != '' and paths[0] != '' and paths[1] != '':
                            function(index=index_in, cdo=cdo, out_dir=paths[-1], nc_in=paths[0], nc_in2=paths[1])
                        else:
                            debug(clean("Missign param for out_dir: " + paths[-1]))
Esempio n. 6
0
def gsl_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    For gsl index, which requires some special calculation
    '''
    from indices_misc import debug, clean
    import pathlib

    tasmin = nc_in
    tasmax = nc_in2

    nc_out_gsl = (out_dir + '/' + pathlib.Path(nc_in).stem + "_" +
                  index['name'] + ".nc")

    debug(clean(nc_out_gsl))

    # For some reason, the etccdi operator gives error:  etccdi_gsl (Abort): Operator not callable by this name! Name is: etccdi_gsl
    # cdo.etccdi_gsl(input="-divc,2 -add " + tasmin + " " + tasmax + " -gtc,1 " + tasmax, output=nc_out_gsl, force=False, returnCdf=False)

    # use eca. Eca needs as second input a landmask. As the regions we are using are already in land, just produce a file with all 1s.
    cdo.eca_gsl(input="-divc,2 -add " + tasmin + " " + tasmax +
                " -addc,1 -mulc,0 -setmisstoc,1 -seltimestep,1 " + tasmax,
                output=nc_out_gsl,
                force=False,
                returnCdf=False)
Esempio n. 7
0
def manual_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    Calculate indices manually
    '''

    from indices_misc import logger, clean
    import pathlib

    if nc_in2 is not None:  # index with two parameters
        nc_in += " " + nc_in2

    nc_out = (out_dir + '/' + pathlib.Path(nc_in).stem + "_" + index['name'] +
              ".nc")

    logger.debug(clean((nc_out)))
    cdo.chname(
        index['param'][0] + "," + index['cdo_name'],  # change aprameter name
        input="-setattribute," + index['param'][0] + "@long_name=" +
        index['long_name']  # change parameter long name
        + index['cdo_fun'] + nc_in,  # the actual command
        output=nc_out,
        options='-f nc',
        force=False,
        returnCdf=False)
Esempio n. 8
0
def graph_ts():

    import numpy as np
    from netCDF4 import Dataset

    for index_l, index_path in get_subdirs(indices_output_dir):
        if index_l != index_in['name']:
            continue

        # first find the minumum and maximum for the index
        max_list_models = []
        min_list_models = []

        max_list = {}
        min_list = {}
        avg6190 = {}
        models_plot = {}
        for region, region_path in get_subdirs(index_path):
            if 'ignore' in index_in and region in index_in['ignore']:
                continue
            avg6190[region] = None
            models_plot[region] = []
            max_list[region] = []
            min_list[region] = []
            for rcp, rcp_path in get_subdirs(region_path):
                for file, file_path in get_subfiles(rcp_path):
                    if file.startswith("._"):
                        continue
                    elif file.endswith("ts.nc"):
                        fh = Dataset(file_path, 'r')
                        param = fh.variables[index_in['cdo_name']][:, 0, 0]
                        max_list[region].append(np.amax(param))
                        min_list[region].append(np.amin(param))
                        fh.close()
                    elif file.endswith("Avg6190.nc"):
                        fh = Dataset(file_path, 'r')
                        avg6190[region] = fh.variables[index_in['cdo_name']][0, 0, 0]
                        fh.close()

                # all models
                for model, model_path in get_subdirs(rcp_path+"/models/"):
                    for file, file_path in get_subfiles(model_path):
                        if file.startswith("._"):
                            continue

                        elif (file.endswith("ts_anomal.nc") and index_in['do_anom']) or (file.endswith('ts.nc') and not index_in['do_anom']):
                            fh = Dataset(file_path, 'r')
                            param = fh.variables[index_in['cdo_name']][:, 0, 0]
                            max_list_models.append(np.amax(param))
                            min_list_models.append(np.amin(param))
                            fh.close()
                            models_plot[region].append(file_path)

        # plot
        for region, region_path in get_subdirs(index_path):
            if 'ignore' in index_in and region in index_in['ignore']:
                continue
            files_to_plot = []
            for rcp, rcp_path in get_subdirs(region_path):
                for file, file_path in get_subfiles(rcp_path):
                    if file.startswith("._"):
                        continue
                    elif file.endswith("ts.nc"):
                        files_to_plot.append(file_path)
                        debug(clean((file_path)))
            plot_time_series(index_in, files_to_plot, models_plot_array=models_plot[region], region=region,
                             png_name_in=region_path+"/"+index_in['name']+'_'+region+'_Models_ts.png',
                             png_name_latex=indices_graphs_out_dir+"/"+index_l+"/"+region+"/"+index_in['name']+'_'+region+'_Models_ts.png',
                             min=min(min_list[region]), max=max(max_list[region]), avg6190=avg6190[region])
            plot_time_series(index_in, files_to_plot, region=region,
                             png_name_in=region_path+"/"+index_in['name']+'_'+region+'_ts.png', min=min(min_list[region]), max=max(max_list[region]), avg6190=avg6190[region])
Esempio n. 9
0
parser = argparse.ArgumentParser()
parser.add_argument("--loop", help="Loop all the models and calculates the index", action="store_true")
parser.add_argument("--merge", help="Merge the index from all the models")
parser.add_argument("--graph", help="Graph the merged index")
parser.add_argument("--index", help="Index to calculate")
parser.add_argument("--rcp", help="rcp experiment: posisble values")

args = parser.parse_args()
if len(sys.argv) == 1:
    parser.print_help()
    sys.exit()

args.index = str.lower(args.index)

if args.index not in indices:
    debug(clean(("Index not supported. Here is a list of the supported indeces: ")))
    debug(list(indices.keys()))
    sys.exit()
index_in = indices[args.index]
debug(clean((index_in['name'] + ": " + index_in['description'])))
debug(clean(("Required Parameter(s): " + str(index_in['param']))))

if 'cdo_fun' in index_in:
    debug(clean((index_in['cdo_fun'])))

if args.rcp not in rcp_paths:
    debug(clean(("RCP not supported. Here is a list of the supported RCPs:")))
    debug(clean((list(rcp_paths.keys()))))
    sys.exit()

experiment_set = set([])
Esempio n. 10
0
def plot_basemap_regions(index, nc_in, png_name_in, region_in, title, min, max, png_name_latex=None, poly_in=False):
    '''

    '''
    from netCDF4 import Dataset
    import numpy as np

    import matplotlib.pyplot as plt
    from mpl_toolkits.basemap import Basemap
    import matplotlib.colors as mcolors
    from matplotlib import ticker

    from indices_misc import debug, clean, boxDict, figsize, paral, merid

    box = boxDict[region_in]  # long1, long2, lat1, lat2

    fh = Dataset(nc_in, 'r')

    lons = fh.variables['lon'][:]
    lats = fh.variables['lat'][:]
    param = fh.variables[index['cdo_name']][0:, :, :]

    fh.close()

    margin = 2
    lon_center = box[0]+(box[1]-box[0])/2
    lat_center = box[2]+(box[3]-box[2])/2

    # fig, (ax, cax) = plt.subplots(nrows=2, figsize=figsize[region_in], gridspec_kw={"height_ratios": [1, 0.1]})
    fig, ax = plt.subplots(nrows=1, figsize=figsize[region_in])

    ax.set_title(title, fontweight='bold', fontsize=10)
    m = Basemap(ax=ax, projection='cass', resolution='l',
                llcrnrlon=box[0]-margin, urcrnrlon=box[1]+margin,
                llcrnrlat=box[2]-margin, urcrnrlat=box[3]+margin,
                lon_0=lon_center, lat_0=lat_center)

    lons_dim = len(lons.shape)
    if 2 == lons_dim:
        lon = lons
        lat = lats
    elif 1 == lons_dim:
        lon, lat = np.meshgrid(lons, lats)
    else:
        print("Error in lon lat array dimension: %d" % lons_dim)

    xi, yi = m(lon, lat)

    # ------ COLORBAR -------
    # 1 - Define some nice colors for temp / prec representation
    colors_red = plt.cm.Spectral(np.linspace(0, 0.5, 256))        # nice yellow to red
    colors_white_red = plt.cm.afmhot(np.linspace(0.8, 1, 30))     # nice white to yellow
    colors_blue = plt.cm.seismic(np.linspace(0, 0.5, 286)[::-1])  # nice white to blue

    all_colors_rb = np.vstack((colors_red, colors_white_red, colors_blue))  # put them together (at this point it is symetric)
    all_colors_br = all_colors_rb[::-1]  # The inverted version for blue to red

    # Choose if you want blue to red or red to blue based on the index
    if index['colorbar'] in ['temp_pos', 'prec_neg']:
        all_colors = all_colors_br
    elif index['colorbar'] in ['temp_neg', 'prec_pos']:
        all_colors = all_colors_rb

    hline = index['hline']  # just to avoid typing
    # the TwoSlopeNorm requires that the parameters are ordered: min center max
    if not (min < hline and max > hline):
        if (min >= hline):
            min = hline - 1
        else:
            max = hline+1

    # now that we know the parameters are ordered, we can make the color bar non-symetric, according to the min/max ratio
    center = min+(max-min)/2

    if abs(max-hline) > abs(hline-min):  # if more pos than neg:
        start = int(round(286*(1-abs(hline-min)/abs(max-hline))))  # calculate the amount of neg we can have, considering 286 is the max
        all_colors_short = all_colors[start:]
    else:  # if more neg than pos
        end = int(round(286*(abs(max-hline)/abs(hline-min))))  # calculate the amount of pos we can have
        all_colors_short = all_colors[:286+end]

    # Finally set the colormap
    two_slope_norm = mcolors.TwoSlopeNorm(vmin=min, vmax=max, vcenter=center)
    color_map = mcolors.LinearSegmentedColormap.from_list('custom_map', all_colors_short)

    # plot
    cs = m.pcolor(xi, yi, np.squeeze(param), alpha=0.7, cmap=color_map, norm=two_slope_norm)
    # Add Colorbar to figure
    cbar = m.colorbar(cs, location='bottom', extend='both', pad='7%')

    if 'rel' in nc_in:
        cbar.set_label('Relative Change [%]', fontsize=9)
    else:
        cbar.set_label(index['units'], fontsize=9)

    cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), rotation=45, fontsize=8)
    tick_locator = ticker.MaxNLocator(nbins=10)
    cbar.locator = tick_locator
    cbar.update_ticks()

    # Add Grid Lines
    m.drawparallels(paral[region_in], labels=[1, 0, 0, 0], fontsize=10)
    m.drawmeridians(merid[region_in], labels=[0, 0, 0, 1], fontsize=10)

    # Add Coastlines, and Country Boundaries and topo map
    m.drawcoastlines()
    m.drawcountries(color='k')
    m.shadedrelief(alpha=1)

    plt.savefig(png_name_in, dpi=200)
    debug(clean((png_name_in)))
    if png_name_latex is not None:
        plt.savefig(png_name_latex, dpi=200)
        debug(png_name_latex)
    # plt.show()
    plt.close()
Esempio n. 11
0
def duration_percentile_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    Calculate histogram indices.
    This indices are calculated in three steps:
    1 - Calculate the ydrunmin and ydrunmax
    2 - Calculate the required percentile
    3 - Calculate the index
    '''
    from indices_misc import debug, clean
    import pathlib

    index_cdo_function = getattr(
        cdo, index['cdo_fun']
    )  # get the cdo function coressponding to the input index

    if index_cdo_function is None:
        debug(clean(("Error, not an index from etccdi")))
        return -1

    nc_out_runmin = (out_dir + '/' + pathlib.Path(nc_in).stem + "_ydrunmin.nc")
    nc_out_runmax = (out_dir + '/' + pathlib.Path(nc_in).stem + "_ydrunmax.nc")
    nc_out_th = (out_dir + '/' + pathlib.Path(nc_in).stem + "_thr" +
                 index['percentile'] + ".nc")
    nc_out_index = (out_dir + '/' + pathlib.Path(nc_in).stem + "_" +
                    index['name'] + ".nc")

    windowDays = "5"
    selyear = "-selyear,1961/1990 "
    readMethod = "circular"
    percentileMethod = "rtype8"

    # Genreate runmin and runmax in reference period (if not already generated)
    cdo.ydrunmin(windowDays,
                 input=selyear + nc_in,
                 output=nc_out_runmin,
                 options='-f nc',
                 force=False,
                 returnCdf=False)
    cdo.ydrunmax(windowDays,
                 input=selyear + nc_in,
                 output=nc_out_runmax,
                 options='-f nc',
                 force=False,
                 returnCdf=False)

    # calculate index using eca function => do it for each year and then merge
    if 'eca' in index['cdo_fun']:
        # calculate required percentile in ref period
        cdo.ydrunpctl(
            index['percentile'],
            windowDays,  # "rm="+readMethod, "pm="+percentileMethod,  # The eca version does not need these input arguments
            input=selyear + nc_in + " " + nc_out_runmin + " " + nc_out_runmax,
            output=nc_out_th,
            options='-f nc',
            force=False,
            returnCdf=False)
        selyear_index(cdo, index, out_dir, nc_in, nc_out_th)

    else:  # directly with etccdi, but it gives unexpected results (bug)
        # calculate required percentile in ref period
        cdo.ydrunpctl(index['percentile'],
                      windowDays,
                      "rm=" + readMethod,
                      "pm=" + percentileMethod,
                      input=selyear + nc_in + " " + nc_out_runmin + " " +
                      nc_out_runmax,
                      output=nc_out_th,
                      options='-f nc',
                      force=False,
                      returnCdf=False)
        index_cdo_function(6,
                           "freq=year",
                           input=nc_in + " " + nc_out_th,
                           output=nc_out_index,
                           options='-f nc',
                           force=False,
                           returnCdf=False)

    debug(clean((nc_out_index)))
    print("")
Esempio n. 12
0
def percentile_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    Calculate percentile indices.
    For temperature indices that require bootstrapping first calculates ydrunmin and ydrunmax (if not already generated)
    For precipitation indices just uses the timmin and timmax
    '''
    from indices_misc import logger, clean
    import pathlib

    index_cdo_function = getattr(
        cdo, index['cdo_fun']
    )  # get the cdo function coressponding to the input index

    if index_cdo_function is None:
        logger.debug(clean(("Error, not an index from etccdi")))
        return -1

    nc_out_runmin = (out_dir + '/' + pathlib.Path(nc_in).stem + "_runmin.nc")
    nc_out_runmax = (out_dir + '/' + pathlib.Path(nc_in).stem + "_runmax.nc")
    nc_out_percentile = (out_dir + '/' + pathlib.Path(nc_in).stem + "_" +
                         index['name'] + ".nc")

    windowDays = ""
    bootstrapping = "1961,1990"

    if index['isTemp']:
        windowDays = "5,"  # Number of timestamps required for Temp percentiles
        # Genreate runmin and runmax (if not already generated)
        cdo.ydrunmin(windowDays,
                     input=nc_in,
                     output=nc_out_runmin,
                     options='-f nc',
                     force=False,
                     returnCdf=False)
        cdo.ydrunmax(windowDays,
                     input=nc_in,
                     output=nc_out_runmax,
                     options='-f nc',
                     force=False,
                     returnCdf=False)
    else:
        # Genreate runmin and runmax (if not already generated)
        # timmin, timmax calculate the min and max in time (so the result is a point for each grid)
        # setrtomiss sets the values in the range to missing value. Because we are interested in days with precipitation larger than 1, wet-day precipitation (PR > 1 mm)
        # then set precipitation in range -50,1 to the missing value (to ignore it)
        cdo.timmin(input="-setrtomiss,-50,1 " + nc_in,
                   output=nc_out_runmin,
                   options='-f nc',
                   force=False,
                   returnCdf=False)
        cdo.timmax(input="-setrtomiss,-50,1 " + nc_in,
                   output=nc_out_runmax,
                   options='-f nc',
                   force=False,
                   returnCdf=False)

    logger.debug(clean((nc_out_runmin)))
    logger.debug(clean((nc_out_runmax)))
    logger.debug(clean((nc_out_percentile)))
    index_cdo_function(windowDays + bootstrapping,
                       input=nc_in + " " + nc_out_runmin + " " + nc_out_runmax,
                       output=nc_out_percentile,
                       options='-f nc',
                       force=False,
                       returnCdf=False)
    logger.debug(clean(("")))
Esempio n. 13
0
def direct_periods_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    For indices that give a single value for the timeperiod, like sdii
    Generate given index for the following 4 periods:
    Past        : 1861 1890
    Reference   : 1961 1990
    Present     : 1991 2020
    Future      : 2061 2090

    The index is calculated for seasons ANN DJF and JJA
    '''
    import pathlib
    from indices_misc import logger, clean
    from indices_misc import period_range_array, period_name_array
    from os import path

    index_cdo_function = getattr(
        cdo, index['cdo_fun']
    )  # get the cdo function coressponding to the input index

    if index_cdo_function is None:
        logger.debug(clean(("Error, not an index from etccdi")))
        return -1
    print(nc_in2)
    if nc_in2 is None:
        nc_in2 = ''

    if 'add_fun' in index:
        index_add_function = getattr(
            cdo, index['add_fun']
        )  # get the cdo function coressponding to the input index

        if index_add_function is None:
            logger.debug(clean(("Error, not an function from cdo")))
            return -1

    add_params = ''
    if 'add_params' in index:
        add_params = index['add_params']

    for season in index['seasons']:
        pathlib.Path(out_dir + '/' + season).mkdir(parents=True, exist_ok=True)
        cdo_season_command = "-select,season=" + season
        for year_range, name in zip(period_range_array, period_name_array):
            cdo_year_command = "-selyear," + year_range
            nc_out = out_dir + "/" + season + "/" + pathlib.Path(
                nc_in).stem + "_" + index['name'] + "_" + name + ".nc"
            if path.exists(nc_out):  # False:
                logger.debug(clean(("%s already exists", nc_out)))
                return

            logger.debug(clean((nc_out)))
            if 'add_fun' not in index:
                index_cdo_function(add_params,
                                   input=cdo_year_command + " " +
                                   cdo_season_command + " " + nc_in + " " +
                                   nc_in2,
                                   output=nc_out,
                                   options='-f nc',
                                   force=False,
                                   returnCdf=False)
            else:
                cdo_function_command = '-' + index['cdo_fun'] + ',' + add_params
                index_add_function(
                    index['add_fun_params'],
                    input=cdo_function_command + ' ' + cdo_year_command + " " +
                    cdo_season_command + " " + nc_in + " " + nc_in2,
                    output=nc_out,
                    options='-f nc',
                    force=False,
                    returnCdf=False)
Esempio n. 14
0
def plot_bar(index, regions, glob45, glob85, png_name_in, rel=False):

    import matplotlib.pyplot as plt
    from math import copysign
    from indices_misc import debug, clean

    start = 0
    end = 4
    barSep = 0.55
    barWidth = 0.3
    startpos = [1.5, 2.5+(len(regions)+1)*barSep]  # position for RCP45 and RCP85 in a 0 to 3 line
    color = {'Global': 'lightgray', 'Alpine': 'papayawhip', 'Andes': 'paleturquoise'}

    texts = []
    pos = list(startpos)

    plt.figure()

    if glob45 is not None:
        bars = [glob45-index['hline'], glob85-index['hline']]

        plt.bar(pos, bars, width=barWidth, bottom=index['hline'], color=color['Global'], edgecolor='gray', capsize=2, label='Global')

        texts.append(plt.text(pos[0], glob45, str(round(float(glob45), 1)), zorder=4, ha='center', va='center', fontsize=9,
                     bbox={'facecolor': 'white', 'edgecolor': 'none', 'pad': -2, 'alpha': 0.5}))

        texts.append(plt.text(pos[1], glob85, str(round(float(glob85), 1)), zorder=4, ha='center', va='center', fontsize=9,
                     bbox={'facecolor': 'white', 'edgecolor': 'none', 'pad': -2, 'alpha': 0.5}))

        pos[0] += barSep+0.05
        pos[1] += barSep+0.05
        end += barSep

    for name, values in regions.items():
        bars = [values['rcp45']-index['hline'], values['rcp85']-index['hline']]
        errors = [[values['rcp45']-values['rcp45_25'], values['rcp85']-values['rcp85_25']],
                  [values['rcp45_75']-values['rcp45'], values['rcp85_75']-values['rcp85']]]

        plt.bar(pos, bars, width=barWidth, bottom=index['hline'], color=color[name], edgecolor='gray', yerr=errors, label=name,
                error_kw=dict(ecolor='gray', lw=1, capsize=3, capthick=1, alpha=0.5))

        texts.append(plt.text(pos[0], values['rcp45'], str(round(float(values['rcp45']), 1)), zorder=4, ha='center', va='center', fontsize=9,
                     bbox={'facecolor': 'white', 'edgecolor': 'none', 'pad': -2, 'alpha': 0.5}))
        texts.append(plt.text(pos[1], values['rcp85'], str(round(float(values['rcp85']), 1)), zorder=4, ha='center', va='center', fontsize=9,
                     bbox={'facecolor': 'white', 'edgecolor': 'none', 'pad': -2, 'alpha': 0.5}))

        pos[0] += barSep
        pos[1] += barSep
        end += barSep*2

    ax = plt.gca()

    correct = (ax.get_ylim()[1]-ax.get_ylim()[0])/25
    for text in texts:
        xtext, ytext = text.get_position()
        ytext -= index['hline']
        correct = copysign(correct, ytext)
        ytext = ytext+correct
        ytext += index['hline']
        text.set_position([xtext, ytext])
        if ytext < index['hline']:
            # ax.tick_params(axis="x", pad=-15)
            plt.legend(loc='lower left', fontsize=8)
            plt.title(index['short_desc'], fontweight='bold', pad=25)
        else:
            plt.legend(loc='upper left', fontsize=8)
            plt.title(index['short_desc'], fontweight='bold', pad=10)
    # ticks = [(pos[0]-startpos[0])/2.0 + startpos[0]-barSep/2, (pos[1]-startpos[1])/2.0 + startpos[1]-barSep/2]
    tx45 = plt.text((pos[0]-startpos[0])/2.0 + startpos[0]-barSep/2, ax.get_ylim()[0]-abs(correct), 'RCP4.5', ha='center', va='top', fontweight='bold')
    tx85 = plt.text((pos[1]-startpos[1])/2.0 + startpos[1]-barSep/2, ax.get_ylim()[0]-abs(correct), 'RCP8.5', ha='center', va='top', fontweight='bold')

    if rel:
        plt.ylabel('Relative Change [%]')
    else:
        plt.ylabel(index['units'])

    # spine left and bottom at zero. The x does not need ticks
    ax.spines['left'].set_position('zero')
    ax.spines['right'].set_color('none')
    ax.yaxis.tick_left()
    ax.spines['bottom'].set_position(['data', index['hline']])
    ax.spines['top'].set_color('none')
    ax.xaxis.set_ticks_position('none')

    # kwargs = dict(fontweight='bold')
    # plt.xticks(ticks, ['RCP4.5', 'RCP8.5'], **kwargs)
    plt.xticks([])
    plt.xlim(start, end)
    # plt.show()
    debug(clean(png_name_in))
    plt.savefig(png_name_in, dpi=150, bbox_inches="tight")

    if 'limit_barplot' in index:
        plt.ylim(index['limit_barplot'])
        correct = (ax.get_ylim()[1]-ax.get_ylim()[0])/25
        tx45.set_position([(pos[0]-startpos[0])/2.0 + startpos[0]-barSep/2, ax.get_ylim()[0]-abs(correct)])
        tx85.set_position([(pos[1]-startpos[1])/2.0 + startpos[1]-barSep/2, ax.get_ylim()[0]-abs(correct)])
        plt.legend(loc=index['legendb'], fontsize=8)
        plt.title(index['short_desc'], fontweight='bold', pad=index['titlepad'])
        plt.savefig(png_name_in.replace('.png', '_lim.png'), dpi=150, bbox_inches="tight")
Esempio n. 15
0
def merge_periods(cdo, rcp_path, index):

    from indices_misc import logger, clean
    from indices_misc import period_name_array
    import pathlib

    for season in index['seasons']:
        pathlib.Path(rcp_path + '/' + season).mkdir(parents=True,
                                                    exist_ok=True)
        nc_ensmean_reference = ''
        for period in period_name_array:

            index_file_pathlib_list = pathlib.Path(rcp_path +
                                                   '/models').rglob('*' +
                                                                    season +
                                                                    '*/*' +
                                                                    period +
                                                                    '*.nc')
            index_all_models = ''
            for index_file_pathlib in index_file_pathlib_list:
                index_file = str(index_file_pathlib)
                if (index_file.startswith("._")) or ("regrid" in index_file):
                    continue  # jump to next file
                index_file_regrid = index_file.replace('.nc', '_regrid.nc')
                target_grid_file = rcp_path + '/target_grid.nc'

                cdo.remapbil(target_grid_file,
                             input=index_file,
                             output=index_file_regrid,
                             froce=False)  # regrid to target grid

                index_all_models += index_file_regrid + ' '

            if index_all_models == '':
                logger.debug(
                    clean(("No files found for Season %s and period %s" %
                           (season, period))))
            else:
                nc_ensmean_out = rcp_path + '/' + season + '/' + index[
                    'name'] + "_" + period + '.nc'
                logger.debug(clean((nc_ensmean_out)))
                cdo.enspctl("50",
                            input=index_all_models,
                            output=nc_ensmean_out,
                            options='-f nc',
                            force=False,
                            returnCdf=False)  # median = 50th percentil

                if index['do_anom']:
                    if "reference" in period:
                        nc_ensmean_reference = nc_ensmean_out

                    elif nc_ensmean_reference != '':  # substract reference from period. Only for non reference period, if reference file has been found.
                        nc_ensmean_sub_out = rcp_path + '/' + season + '/' + index[
                            'name'] + "_" + period + '_sub.nc'
                        nc_ensmean_sub_rel_out = rcp_path + '/' + season + '/' + index[
                            'name'] + "_" + period + '_sub_rel.nc'
                        logger.debug(clean((nc_ensmean_sub_out)))
                        cdo.sub(input=nc_ensmean_out + " " +
                                nc_ensmean_reference,
                                output=nc_ensmean_sub_out,
                                options='-f nc',
                                force=False,
                                returnCdf=False)
                        logger.debug(clean((nc_ensmean_sub_rel_out)))
                        setrtomiss = ' '
                        if 'setrtomiss' in index:
                            setrtomiss = index['setrtomiss']
                        cdo.mulc(100,
                                 input="-setattribute," + index['cdo_name'] +
                                 "@units=\"Relative Change (%)\" -div " +
                                 nc_ensmean_sub_out + setrtomiss + ' -abs ' +
                                 nc_ensmean_reference,
                                 output=nc_ensmean_sub_rel_out,
                                 options='-f nc',
                                 force=True,
                                 returnCdf=False)
Esempio n. 16
0
def selyear_index(cdo, index, out_dir, nc_in, nc_in2=None):
    '''
    For indexes that need a selyear to generate a timeseries, like sdii
    '''
    import shutil
    import os.path
    import pathlib
    from indices_misc import logger, clean

    index_cdo_function = getattr(
        cdo, index['cdo_fun']
    )  # get the cdo function coressponding to the input index

    if index_cdo_function is None:
        logger.debug(clean(("Error, not an index from etccdi")))
        return

    if nc_in2 is None:
        nc_in2 = ''

    first_year_file = ((nc_in.split("_"))[-2].split("-")[0])[0:4]
    last_year_file = ((nc_in.split("_"))[-2].split("-")[1])[0:4]

    # To avoid calculating indices again for the historical data
    if False and "rcp85" in nc_in:
        first_year = "2005"
    else:
        first_year = first_year_file

    nc_out_array = ""

    if first_year_file != "1861" or last_year_file != "2090":
        logger.debug(
            clean(("Error in first or last year " + first_year_file + "-" +
                   last_year_file)))
        return

    nc_out_fldmean = (out_dir + '/' + pathlib.Path(nc_in).stem + "_" +
                      index['name'] + "_ts.nc").replace(
                          first_year_file, first_year)
    # Create nc files for field mean and year mean.
    if os.path.exists(nc_out_fldmean):  # False:
        logger.debug(clean(nc_out_fldmean + " already exists"))
        return

    pathlib.Path(out_dir + '/years/').mkdir(parents=True, exist_ok=True)

    add_params = ''
    if 'add_params' in index:
        add_params = index['add_params']

    # loop all years in the range and calculate the index
    for year in range(int(first_year), int(last_year_file) + 1):
        cdo_selyear_command = "-selyear," + str(year)
        nc_out = out_dir + '/years/' + pathlib.Path(
            nc_in).stem + "_" + index['name'] + str(year) + ".nc"

        logger.debug(clean((nc_out)))
        index_cdo_function(add_params,
                           input=cdo_selyear_command + " " + nc_in + " " +
                           nc_in2,
                           output=nc_out,
                           options='-f nc',
                           force=False,
                           returnCdf=False)

        # add the out nc to the array
        nc_out_array = nc_out_array + " " + nc_out

    # after calculating index for each year, merge them.
    nc_out_merge = (
        out_dir + '/' +
        pathlib.Path(nc_in).stem + "_" + index['name'] + ".nc").replace(
            first_year_file,
            first_year)  # adapt the name to the actual first year used
    cdo.mergetime(input=nc_out_array,
                  output=nc_out_merge,
                  options='-f nc',
                  force=False,
                  returnCdf=False)

    # fldmean to obtain a timeseries
    cdo.fldmean(input="-setreftime,1850-01-01,00:00:00 " + nc_out_merge,
                output=nc_out_fldmean,
                options="-f nc",
                force=False,
                returnCdf=False)

    logger.debug(clean(("")))
    logger.debug(clean((nc_out_fldmean)))
    logger.debug(clean(("")))
    try:
        shutil.rmtree(out_dir + '/years/')
    except OSError as e:
        logger.debug(clean(("Error: %s" % (e.strerror))))
Esempio n. 17
0
def merge_ts(cdo, rcp_path, index):  # ts= timeseries

    from indices_misc import logger, clean
    from useful_functions import get_subdirs
    from useful_functions import get_subfiles
    from indices_graph_functions import plot_time_series

    array_all_models = ""
    file_path_list = []  # Just to plot all models
    array_all_models_avg6190 = ""

    for model, model_path in get_subdirs(rcp_path + "/models/"):
        for file, file_path in get_subfiles(model_path):
            if file.startswith("._"):
                continue

            elif file.endswith(
                    "ts.nc"
            ):  # check if end is ts.nc which means it is a time series and needs to be ensembled
                if index[
                        'do_anom']:  # if index requires anomalie, before merging, calculate it
                    nc_avg_61_90 = file_path.replace('_ts.nc', '_avg_61_90.nc')
                    nc_anomal = file_path.replace('_ts.nc', '_ts_anomal.nc')

                    year_range = "-selyear,1961/1990"
                    avg_61_90_val = cdo.timmean(
                        input=year_range + " " + file_path,
                        output=nc_avg_61_90,
                        force=False,
                        options='-f nc',
                        returnCdf=True).variables[index['cdo_name']][0, 0, 0]

                    cdo.subc(avg_61_90_val,
                             input=file_path,
                             force=False,
                             output=nc_anomal
                             )  # substract the timeman of period 61-90

                    array_all_models += nc_anomal + ' '
                    file_path_list.append(
                        nc_anomal)  # only to plot individually for debbugging
                    array_all_models_avg6190 += nc_avg_61_90 + ' '
                else:  # no anomaly required
                    array_all_models += file_path + ' '
                    file_path_list.append(
                        file_path)  # only to plot individually for debbugging

    if array_all_models == '':
        logger.debug(clean(("No files found in %s" % rcp_path)))
    else:
        if index['do_anom']:
            plot_time_series(index=index,
                             file_path_in_array=file_path_list,
                             png_name_in=rcp_path + '/' + index['name'] +
                             "_allModels_ts_anom.png")
        else:
            plot_time_series(index,
                             file_path_list,
                             png_name_in=rcp_path + '/' + index['name'] +
                             "_allModels_ts.png")

        percentil_array = ["25", "50", "75"]  # median = 50th percentil
        for percentil in percentil_array:
            nc_ensmean_out = rcp_path + '/' + index[
                'name'] + '_percent_' + percentil + '_ts.nc'

            if percentil == "mean":
                cdo.ensmean(input=array_all_models,
                            output=nc_ensmean_out,
                            options='-f nc',
                            force=False,
                            returnCdf=False)
            else:
                cdo.enspctl(percentil,
                            input=array_all_models,
                            output=nc_ensmean_out,
                            options='-f nc',
                            force=False,
                            returnCdf=False)  # Ensemble percentiles

            if index['do_anom']:
                nc_ensmean_out_medianOfAvg6190 = rcp_path + '/' + index[
                    'name'] + '_percent_50_ts_medianOfAvg6190.nc'
                cdo.ensmean(input=array_all_models_avg6190,
                            output=nc_ensmean_out_medianOfAvg6190,
                            options='-f nc',
                            force=False,
                            returnCdf=False)

                # find anomalie (this result is not really used)
                nc_avg_61_90 = nc_ensmean_out.replace('_ts.nc',
                                                      '_avg_61_90.nc')
                nc_anomal = nc_ensmean_out.replace('_ts.nc', '_ts_anomal.nc')

                year_range = "-selyear,1961/1990"
                avg_61_90_val = cdo.timmean(
                    input=year_range + " " + nc_ensmean_out,
                    output=nc_avg_61_90,
                    options='-f nc',
                    force=False,
                    returnCdf=True).variables[index['cdo_name']][0, 0, 0]

                cdo.subc(avg_61_90_val, input=nc_ensmean_out,
                         output=nc_anomal)  # substract il file 61-90
                logger.debug(clean((nc_anomal)))
Esempio n. 18
0
def plot_time_series(index, file_path_in_array, models_plot_array=None, region=None, png_name_in=None, png_name_latex=None, min=None, max=None, avg6190=None):
    '''
    plot_time_series ...
    '''
    import matplotlib.pyplot as plt
    import matplotlib.dates as date_plt
    import datetime as dt  # Python standard library datetime  module
    from netcdftime import utime
    from netCDF4 import Dataset  # http://code.google.com/p/netcdf4-python/

    from useful_functions import moving_average

    from bisect import bisect_left
    from indices_misc import debug, clean
    import pathlib

    show_each = False
    # if 'Alpine' in file_path_in_array[0]:
    #     return

    date_fill = None
    rcp45_p25_fill = None
    rcp45_p75_fill = None

    rcp85_p25_fill = None
    rcp85_p75_fill = None

    histo_date_fill = None
    histo_rcp45_p25_fill = None
    histo_rcp45_p75_fill = None

    histo_rcp85_p25_fill = None
    histo_rcp85_p75_fill = None

    days_2006 = 57160.5  # 2006 value in time:units = "days since 1850-1-1 00:00:00" ; time:calendar = "standard" ;'
    minutes_2006 = days_2006 * 24 * 60
    half_window = 0  # half of the window for the smoothing, in years
    half_window2 = 5  # half of the window for the smoothing, in years

    fig, ax = plt.subplots(figsize=(15, 6))

    if 'do_month' in index and index['do_month']:
        # half_window = half_window*12  # for months
        # half_window2 = half_window2*12  # for months
        half_window = 6  # 1years

    # date [x:-y], where x+y = window - 1
    window = half_window * 2
    date_start = half_window
    date_end = half_window - 1

    window2 = half_window2 * 2
    date_start2 = half_window2
    date_end2 = half_window2 - 1

    to_annotate = []

    #  plot all models, in low alpha, in the background (zorder=1)
    if models_plot_array is not None:
        for model_plot in models_plot_array:
            data_in = Dataset(model_plot, mode='r')

            time = data_in.variables['time'][:]
            param = data_in.variables[index['cdo_name']][:]

            # create time vector
            time_uni = data_in.variables['time'].units
            time_cal = data_in.variables['time'].calendar

            cdftime = utime(time_uni, calendar=time_cal)
            date = [cdftime.num2date(t) for t in time]

            if 'minutes' in time_uni:
                index_2006 = bisect_left(time, minutes_2006)
            elif 'days' in time_uni:
                index_2006 = bisect_left(time, days_2006)
            else:
                debug('wrong time units, this function only works with minutes or days units, please modify the script for yours')
                return

            param_scaled_smoothed = moving_average(arr=param[:, 0, 0], win=window)

            if 'rcp45' in model_plot:
                plt.plot(date[date_start: index_2006],  param_scaled_smoothed[:index_2006-date_start],   'k', alpha=0.01, zorder=1)
                plt.plot(date[index_2006-1:-date_end if window > 0 else None], param_scaled_smoothed[index_2006-1-date_end-1:], 'g', alpha=0.02, zorder=1)
            elif 'rcp85' in model_plot:
                plt.plot(date[date_start: index_2006],  param_scaled_smoothed[:index_2006-date_start],   'k', alpha=0.01, zorder=1)
                plt.plot(date[index_2006-1: -date_end if window > 0 else None], param_scaled_smoothed[index_2006-1-date_end-1:], 'r', alpha=0.02, zorder=1)

    # plot median, in foreground (zorder = 4), and collect data for the shadows
    for file_path_in in file_path_in_array:

        debug(clean((file_path_in)))

        data_in = Dataset(file_path_in, mode='r')

        time = data_in.variables['time'][:]
        param = data_in.variables[index['cdo_name']][:]

        # create time vector
        time_uni = data_in.variables['time'].units
        time_cal = data_in.variables['time'].calendar

        cdftime = utime(time_uni, calendar=time_cal)
        date = [cdftime.num2date(t) for t in time]

        # ############# A plot of Maximum precipitation ##############

        if 'minutes' in time_uni:
            index_2006 = bisect_left(time, minutes_2006)
        elif 'days' in time_uni:
            index_2006 = bisect_left(time, days_2006)
        else:
            debug('wrong time units, this function only works with minutes or days units, please modify the script for yours')
            return

        param_smoothed = moving_average(arr=param[:, 0, 0], win=window)
        param_smoothed2 = moving_average(arr=param[:, 0, 0], win=window2)

        if "25" in file_path_in and "rcp45" in file_path_in:
            rcp45_p25_fill = param_smoothed[index_2006-1-date_end-1:]
            histo_rcp45_p25_fill = param_smoothed[:index_2006-date_start]
            # plt.plot(date[index_2006-1:-date_end if window > 0 else None], param_smoothed[index_2006-1-date_end-1:], 'palegreen', zorder=4)
            # plt.plot(date[date_start: index_2006],  param_smoothed[:index_2006-date_start],  'silver', zorder=4)

        elif "75" in file_path_in and "rcp45" in file_path_in:
            rcp45_p75_fill = param_smoothed[index_2006-1-date_end-1:]
            date_fill = date[index_2006-1:-date_end if window > 0 else None]
            histo_rcp45_p75_fill = param_smoothed[:index_2006-date_start]
            histo_date_fill = date[date_start:index_2006]
            # plt.plot(date[index_2006-1:-date_end if window > 0 else None], param_smoothed[index_2006-1-date_end-1:], 'palegreen', zorder=4)
            # plt.plot(date[date_start: index_2006],  param_smoothed[:index_2006-date_start],  'silver', zorder=4)

        elif "rcp45" in file_path_in:
            plt.plot(date[index_2006-1: -date_end if window > 0 else None], param_smoothed[index_2006-1-date_end-1:], 'g', zorder=4, alpha=0.3)  # label=pathlib.Path(file_path_in).stem.split("45")[0])#.split("_histo")[0])
            plt.plot(date[index_2006-1: -date_end2 if window2 > 0 else None], param_smoothed2[index_2006-1-date_end2-1:], 'g', label="RCP45", zorder=5)
            to_annotate.append([date[index_2006-1: -date_end2 if window2 > 0 else None], param_smoothed2[index_2006-1-date_end2-1:], 'palegreen'])
            if show_each:
                plt.plot(date[date_start: index_2006],  param_smoothed[:index_2006-date_start],   'k', zorder=4, label=pathlib.Path(file_path_in).stem.split("45")[0]) #.split("_histo")[0])
            else:
                plt.plot(date[date_start: index_2006],  param_smoothed[:index_2006-date_start],   'k', zorder=4, alpha=0.15)
                plt.plot(date[date_start2: index_2006],  param_smoothed2[:index_2006-date_start2],  'k', zorder=4, label="Historical")

        if "25" in file_path_in and "rcp85" in file_path_in:
            rcp85_p25_fill = param_smoothed[index_2006-1-date_end-1:]
            histo_rcp85_p25_fill = param_smoothed[:index_2006-date_start]
            # plt.plot(date[index_2006-1:-date_end if window > 0 else None], param_smoothed[index_2006-1-date_end-1:], 'lightsalmon', zorder=4)
            # plt.plot(date[date_start: index_2006],  param_smoothed[:index_2006-date_start],  'silver', zorder=4)

        elif "75" in file_path_in and "rcp85" in file_path_in:
            rcp85_p75_fill = param_smoothed[index_2006-1-date_end-1:]
            date_fill = date[index_2006-1:-date_end if window > 0 else None]
            histo_rcp85_p75_fill = param_smoothed[:index_2006-date_start]
            histo_date_fill = date[date_start:index_2006]
            # plt.plot(date[index_2006-1:-date_end if window > 0 else None], param_smoothed[index_2006-1-date_end-1:], 'lightsalmon', zorder=4)
            # plt.plot(date[date_start: index_2006],  param_smoothed[:index_2006-date_start],  'silver', zorder=4)

        elif "rcp85" in file_path_in:
            plt.plot(date[index_2006-1:-date_end if window > 0 else None], param_smoothed[index_2006-1-date_end-1:], 'r', zorder=4, alpha=0.3)  # label=pathlib.Path(file_path_in).stem.split("45")[0])#.split("_histo")[0])
            plt.plot(date[index_2006-1: -date_end2 if window2 > 0 else None], param_smoothed2[index_2006-1-date_end2-1:], 'r', label="RCP45", zorder=5)
            to_annotate.append([date[index_2006-1: -date_end2 if window2 > 0 else None], param_smoothed2[index_2006-1-date_end2-1:], 'lightsalmon'])
            if show_each:
                plt.plot(date[date_start:index_2006],  param_smoothed[:index_2006-date_start],   'k', zorder=4, label=pathlib.Path(file_path_in).stem.split("45")[0])#.split("_histo")[0])
            else:
                plt.plot(date[date_start:index_2006],  param_smoothed[:index_2006-date_start],   'k', zorder=4, alpha=0.15)  # label=pathlib.Path(file_path_in).stem.split("45")[0])#.split("_histo")[0])
        data_in.close()

        if show_each:
            plt.legend()  # loc=(0, 0), fontsize=7, frameon=True, ncol=11,  bbox_to_anchor=(0, -0.5))  # Legend for smoothed
            plt.tight_layout(rect=[0, 0, 1, 1])
            # add horizontal line
            plt.axhline(y=index['hline'], color='k')
            # highligth 1961 to 1990 range
            plt.axvspan(dt.datetime(1961, 1, 1), dt.datetime(1990, 12, 30), color='b', alpha=0.1)
            plt.grid(b=True, linestyle='--', linewidth=1)
            plt.show()

    # Ends loop, plot shading, zorder=2 for white, zorder=3 for green and red
    if rcp45_p25_fill is not None:
        plt.fill_between(date_fill, rcp45_p25_fill, rcp45_p75_fill,
                         facecolor="g",    # The fill color
                         # color='',       # The outline color
                         alpha=0.2, zorder=3)        # Transparency of the fill
        plt.fill_between(date_fill, rcp45_p25_fill, rcp45_p75_fill,
                         facecolor="white",    # The fill color
                         # color='',       # The outline color
                         zorder=2)

    if rcp85_p25_fill is not None:
        plt.fill_between(date_fill, rcp85_p25_fill, rcp85_p75_fill,
                         facecolor="r",    # The fill color
                         # color='',       # The outline color
                         alpha=0.2, zorder=3)        # Transparency of the fill

        plt.fill_between(date_fill, rcp85_p25_fill, rcp85_p75_fill,
                         facecolor="white",    # The fill color
                         # color='',       # The outline color
                         zorder=2)        # Transparency of the fill

    if histo_rcp45_p25_fill is not None:
        plt.fill_between(histo_date_fill, histo_rcp45_p25_fill, histo_rcp45_p75_fill,
                         facecolor="silver",    # The fill color
                         # color='',       # The outline color
                         alpha=1, zorder=2)

    if histo_rcp85_p25_fill is not None:
        plt.fill_between(histo_date_fill, histo_rcp85_p25_fill, histo_rcp85_p75_fill,
                         facecolor="silver",   # The fill color
                         # color='',      # The outline color
                         alpha=1, zorder=2)       # Transparency of the fill

    # plt.tight_layout(rect=[0, 0, 1, 1])

    # add horizontal line at y=0

    plt.axhline(y=index['hline'], color='k', alpha=0.2, linestyle='--')
    # debug(clean((list(plt.yticks()[0]))))
    # plt.yticks(list(plt.yticks()[0]) + [index['hline']])
    if avg6190 is None or ('do_rel' in index and not index['do_rel']):
        ax.secondary_yaxis('right', functions=(lambda x: x, lambda x: x))
    else:
        secaxy = ax.secondary_yaxis('right', functions=(lambda x: (x*100)/abs(avg6190), lambda x: (x*abs(avg6190))/100))
        secaxy.set_ylabel('Relative change [%]', fontweight='bold')

    # if avg6190 is not None:
    #     plt.title(index['short_desc'].split('(')[1].split(')')[0]+" [1961:1990] = "+str(round(float(avg6190), 1))+"           ", loc='right', fontsize=10)

    # highligth periods
    plt.axvspan(dt.datetime(1961, 1, 1), dt.datetime(1990, 12, 30), color='b', alpha=0.05)
    plt.axvspan(dt.datetime(1861, 1, 1), dt.datetime(1890, 12, 30), color='k', alpha=0.05)
    plt.axvspan(dt.datetime(1991, 1, 1), dt.datetime(2020, 12, 30), color='k', alpha=0.05)
    plt.axvspan(dt.datetime(2061, 1, 1), dt.datetime(2090, 12, 30), color='k', alpha=0.05)
    plt.axvline(x=dt.datetime(2006, 1, 1), color='k', alpha=0.2, linestyle='--')

    plt.grid(linestyle='dotted', linewidth=1, axis='y', alpha=0.4)

    cdftime = utime(time_uni, calendar=time_cal)
    date = [cdftime.num2date(time[140])]
    dates = [dt.datetime(1861,  1,  1),
             dt.datetime(1890, 12, 30),
             dt.datetime(1961,  1,  1),
             dt.datetime(1990, 12, 30),
             dt.datetime(2006,  1,  1),
             dt.datetime(2020,  1,  1),
             dt.datetime(2061,  1,  1),
             dt.datetime(2090, 12, 30)]

    dates_plot = [date_plt.date2num(date) for date in dates]
    plt.xticks(dates_plot)
    # format the ticks
    years_fmt = date_plt.DateFormatter('%Y')
    ax.xaxis.set_major_formatter(years_fmt)
    plt.xlim(dt.datetime(1861,  1,  1), dt.datetime(2100,  1,  1))

    # plt.ticklabel_format(useOffset=True, axis='y')
    plt.title(index['short_desc'], fontweight='bold')
    if region is not None:
        plt.title("         "+region, loc='left', fontsize=10)

    leg_loc = 'upper left'
    if 'legend' in index:
        leg_loc = index['legend']
    plt.legend(loc=leg_loc, fancybox=True, facecolor='white')

    if 'do_month' in index and index['do_month']:
        plt.xlabel("Month", fontweight='bold')
    else:
        plt.xlabel("Year", fontweight='bold')

    plt.ylabel(index['units'], fontweight='bold')

    if png_name_in is None:
        plt.show()
    else:

        nice_name = index['short_desc'].split('(')[1].split(')')[0] + ' = '
        if avg6190 is not None:
            avg_name = (index['short_desc'].split('(')[1].split(')')[0]+" [1961:1990] = "+str(round(float(avg6190), 1)))
        else:
            avg_name = None
        ann_list = []
        ann_list.extend(annot_max(index, to_annotate, nice_name))
        ann_list.append(annot_avg(avg_name, index))
        plt.savefig(png_name_in.replace('.png', '_ind.png'), dpi=150, bbox_inches="tight")

        for ann in ann_list:
            if ann is not None:
                ann.remove()

        if min is not None and max is not None:
            plt.ylim(min, max)

        ann_list = []
        ann_list.extend(annot_max(index, to_annotate, nice_name))
        ann_list.append(annot_avg(avg_name, index))

        plt.savefig(png_name_in, dpi=150, bbox_inches="tight")
        debug(clean(png_name_in))
        if png_name_latex is not None:
            debug(clean(png_name_latex))
            plt.savefig(png_name_latex, dpi=150, bbox_inches="tight")

        for ann in ann_list:
            if ann is not None:
                ann.remove()

        if 'limits' in index:
            plt.ylim(index['limits'][0], index['limits'][1])

            ann_list = []
            ann_list.extend(annot_max(index, to_annotate, nice_name))
            ann_list.append(annot_avg(avg_name, index))

            plt.savefig(png_name_in.replace('.png', '_lim_'+str(index['limits'][1])+'.png'), dpi=150, bbox_inches="tight")