def map_percentile_single(incubes, outpath, region, anomaly=False): """ Produces maps for individual scenarios of the 10th and 90th percentile of the model ensemble. :param incubes: single 2d file of a multi-model metric cube :param outpath: the path where the plot is saved :param region: the region dictionary as defined in constants :param anomaly: boolean, switch for anomaly calculation :return: plot """ fname = incubes + '_2d.nc' fdict = atlas_utils.split_filename_path(fname) if fdict['aggregation'] not in cnst.METRIC_AGGS[fdict['metric']]: return ano = glob.glob(incubes + '_2d.nc') if len(ano) != 1: sys.exit('Found none or too many files, need one file') ano = ano[0] scen = fdict['scenario'] metric = fdict['metric'] variable = fdict['variable'] season = fdict['season'] bc = fdict['bc_res'] if (anomaly == True) & (scen == 'historical'): return wfdei_file = cnst.METRIC_DATADIR + os.sep + 'WFDEI' + os.sep + metric + '_' + variable + '_WFDEI_historical_' + season + '*_2d.nc' wfdei = glob.glob(wfdei_file) if len(wfdei) != 1: print(wfdei) print('No or too many wfdei files. Please check') pdb.set_trace() cube = iris.load_cube(ano) wcube = iris.load_cube(wfdei) if anomaly: ano_hist = ano.replace(fdict['scenario'], 'historical') hist = iris.load_cube(ano_hist) data = atlas_utils.anomalies(hist, cube, percentage=False) data_perc = atlas_utils.anomalies(hist, cube, percentage=True) data_perc = data_perc.collapsed('model_name', iris.analysis.PERCENTILE, percent=[10, 90]) data = data.collapsed('model_name', iris.analysis.PERCENTILE, percent=[10, 90]) if np.nanmax(data.data) - np.nanmin(data.data) > np.nanmax(data.data): levels = (atlas_utils.datalevels_ano( np.append(data[0].data, data[1].data)), atlas_utils.datalevels_ano( np.append(data[0].data, data[1].data))) cmap = 'RdBu_r' if 'tas' in variable else 'RdBu' else: levels = (atlas_utils.datalevels( np.append(data[0].data, data[1].data)), atlas_utils.datalevels( np.append(data[0].data, data[1].data))) cmap = 'Reds' if 'tas' in variable else 'Blues' if np.nanmax(data_perc.data) - np.nanmin(data_perc.data) > np.nanmax( data_perc.data): plevels = (atlas_utils.datalevels_ano( np.append(data_perc[0].data, data_perc[1].data)), atlas_utils.datalevels_ano( np.append(data_perc[0].data, data_perc[1].data))) else: plevels = (atlas_utils.datalevels( np.append(data_perc[0].data, data_perc[1].data)), atlas_utils.datalevels( np.append(data_perc[0].data, data_perc[1].data))) plot_dic1 = { 'data': data, 'ftag': scen + 'Anomaly', 'cblabel': 'anomaly', 'levels': levels, 'cmap': cmap } if not 'tas' in variable: plot_dic2 = { 'data': data_perc, 'ftag': scen + 'PercentageAnomaly', # if cnst.LANGUAGE == 'ENGLISH' else ' 'cblabel': 'percentageAnomaly', 'levels': plevels, 'cmap': cmap } toplot = [plot_dic1, plot_dic2] else: toplot = [plot_dic1] else: data = cube data = data.collapsed('model_name', iris.analysis.PERCENTILE, percent=[10, 90]) plot_dic1 = { 'data': data, 'ftag': scen, 'cblabel': '', 'levels': (atlas_utils.datalevels(np.append(data[0].data, data[1].data)), atlas_utils.datalevels(np.append(data[0].data, data[1].data))), 'cmap': 'viridis' } toplot = [plot_dic1] map = False map1 = False map2 = False for p in toplot: f = plt.figure(figsize=lblr.getFigSize(region[0], 'map'), dpi=300) siz = 6 lon = data.coord('longitude').points lat = data.coord('latitude').points if not np.nansum(wcube.data): ax = f.add_subplot(311) ax.text(0.5, 0.5, 'Zero values', ha='center') else: try: ax = f.add_subplot(311, projection=ccrs.PlateCarree()) dataw = np.ma.array(wcube.data, mask=wcube.data == np.nan) ax.set_autoscale_on('False') map = ax.contourf(lon, lat, dataw, transform=ccrs.PlateCarree(), cmap='viridis', vmin=np.nanmin(dataw), vmax=np.nanmax(dataw), extend='both') ax.set_ylim(np.min(lat), np.max(lat)) ax.set_xlim(np.min(lon), np.max(lon)) except ValueError: ax = f.add_subplot(311) ax.text(0.5, 0.5, 'Zero values', ha='center') if map: ax.coastlines() # Gridlines siz = 6 xl = ax.gridlines(draw_labels=True) xl.xlabels_top = False xl.ylabels_right = False xl.xformatter = LONGITUDE_FORMATTER xl.yformatter = LATITUDE_FORMATTER xl.xlabel_style = {'size': siz, 'color': 'k'} xl.ylabel_style = {'size': siz, 'color': 'k'} # Countries ax.add_feature(cartopy.feature.BORDERS, linestyle='--') cb = plt.colorbar(map, format='%1.1f') cb.set_label(lblr.getYlab(metric, variable)) if cnst.LANGUAGE == 'ENGLISH': ax.set_title('WFDEI historical') else: ax.set_title('WFDEI historique') if not np.nansum(p['data'][1].data): ax1 = f.add_subplot(312) ax1.text(0.5, 0.5, 'Zero values', ha='center') else: try: ax1 = f.add_subplot(312, projection=ccrs.PlateCarree()) data1 = np.ma.array(p['data'][1].data, mask=p['data'][1].data == np.nan) ax1.set_autoscale_on('False') # plt.gca().patch.set_color('.25') map1 = ax1.contourf(lon, lat, data1, transform=ccrs.PlateCarree(), cmap=p['cmap'], levels=p['levels'][1], extend='both') ax1.set_ylim(np.min(lat), np.max(lat)) ax1.set_xlim(np.min(lon), np.max(lon)) except ValueError: ax1 = f.add_subplot(312) ax1.text(0.5, 0.5, 'Zero values', ha='center') if map1: ax1.coastlines() # Gridlines siz = 6 xl = ax1.gridlines(draw_labels=True) xl.xlabels_top = False xl.ylabels_right = False xl.xformatter = LONGITUDE_FORMATTER xl.yformatter = LATITUDE_FORMATTER xl.xlabel_style = {'size': siz, 'color': 'k'} xl.ylabel_style = {'size': siz, 'color': 'k'} # Countries ax1.add_feature(cartopy.feature.BORDERS, linestyle='--') cb = plt.colorbar(map1, format='%1.1f') # cb.set_label('10th percentile ' + p['cblabel']) cb.set_label(lblr.getYlab(metric, variable, anom=p['cblabel'])) if cnst.LANGUAGE == 'ENGLISH': ax1.set_title('Future 90th percentile') else: ax1.set_title('90e percentile (futur)') if not np.nansum(p['data'][0].data): ax2 = f.add_subplot(313) ax2.text(0.5, 0.5, 'Zero values', ha='center') else: try: ax2 = f.add_subplot(313, projection=ccrs.PlateCarree()) data2 = np.ma.array(p['data'][0].data, mask=p['data'][0].data == np.nan) ax2.set_autoscale_on('False') map2 = ax2.contourf(lon, lat, data2, transform=ccrs.PlateCarree(), cmap=p['cmap'], levels=p['levels'][0], extend='both') ax2.set_ylim(np.min(lat), np.max(lat)) ax2.set_xlim(np.min(lon), np.max(lon)) except ValueError: ax2 = f.add_subplot(313) ax2.text(0.5, 0.5, 'Zero values', ha='center') if map2: ax2.coastlines() # Gridlines xl = ax2.gridlines(draw_labels=True, ) xl.xlabels_top = False xl.ylabels_right = False xl.xformatter = LONGITUDE_FORMATTER xl.yformatter = LATITUDE_FORMATTER xl.xlabel_style = {'size': siz, 'color': 'k'} xl.ylabel_style = {'size': siz, 'color': 'k'} # Countries ax2.add_feature(cartopy.feature.BORDERS, linestyle='--') cb = plt.colorbar(map2, format='%1.1f') cb.set_label(lblr.getYlab(metric, variable, anom=p['cblabel'])) if cnst.LANGUAGE == 'ENGLISH': ax2.set_title('Future 10th percentile') else: ax2.set_title('10e percentile (futur)') f.suptitle(lblr.getTitle(metric, variable, season, scen, bc, region[1], anom=p['cblabel']), fontsize=10) plt.tight_layout(rect=[0, 0.01, 1, 0.95]) if (region[0] == 'BF') | (region[0] == 'SG'): plt.tight_layout(rect=[0, 0.01, 1, 0.95]) f.subplots_adjust(right=0.8, left=0.2) else: plt.tight_layout(rect=[0, 0.01, 1, 0.95]) f.subplots_adjust(left=0.05, right=1) plt.savefig(outpath + os.sep + fdict['metric'] + '_' + fdict['variable'] + '_' + fdict['bc_res'] + '_' + fdict['season'] + '_' + region[0] + '_mapPerc_' + p['ftag'] + '.png') plt.close(f)
def nbModels_histogram_single(incubes, outpath, region, anomaly=False): """ Histogram plot showing the number of models within different ranges of the metric (anomaly) value for a single scenario :param incubes: wildcard path to all tseries multi-model cubes :param outpath: the path where the plot is saved :param region: the region dictionary as defined in constants :param anomaly: boolean, switch for anomaly calculation :return: plot """ fname = incubes + '_tseries.nc' fdict = atlas_utils.split_filename_path(fname) if fdict['aggregation'] not in cnst.METRIC_AGGS[fdict['metric']]: return ano = glob.glob(fname) if len(ano) != 1: print incubes pdb.set_trace() sys.exit('Found too many or no files, need one file') ano = ano[0] fdict = atlas_utils.split_filename_path(ano) scen = fdict['scenario'] metric = fdict['metric'] variable = fdict['variable'] season = fdict['season'] bc = fdict['bc_res'] if (anomaly == True) & (scen == 'historical'): return cube = iris.load_cube(ano) cube = atlas_utils.time_slicer(cube, fdict['scenario']) cube.data = np.ma.masked_invalid(cube.data) cube = cube.collapsed('year', iris.analysis.MEDIAN) if anomaly: ano_hist = ano.replace(fdict['scenario'], 'historical') hist = iris.load_cube(ano_hist) hist = atlas_utils.time_slicer(hist, 'historical') hist.data = np.ma.masked_invalid(hist.data) hist = hist.collapsed('year', iris.analysis.MEDIAN) data = atlas_utils.anomalies(hist, cube, percentage=False) data_perc = atlas_utils.anomalies(hist, cube, percentage=True) data = np.ma.masked_invalid(data.data) data_perc = np.ma.masked_invalid(data_perc.data) if np.nansum(data) and np.ma.count(data): if np.nanmax(data) - np.nanmin(data) > np.nanmax(data): levels = atlas_utils.binlevels(data) else: levels = np.linspace(np.nanmin(data), np.nanmax(data), 14) else: levels = np.arange(-1, 1, 10) data = np.zeros_like(data) if np.nansum(data_perc) and np.ma.count(data): if np.nanmax(data_perc) - np.nanmin(data_perc) > np.nanmax( data_perc): plevels = atlas_utils.binlevels(data_perc) else: plevels = np.linspace(np.nanmin(data_perc), np.nanmax(data_perc), 14) else: plevels = np.arange(-1, 1, 10) data_perc = np.zeros_like(data_perc) try: histo, h = np.histogram(data[np.isfinite(data)], bins=levels) except ValueError: pdb.set_trace() try: histop, hp = np.histogram(data_perc[np.isfinite(data_perc)], bins=plevels) except ValueError: pdb.set_trace() plot_dic1 = { 'data': histo, 'ftag': scen + 'Anomaly', 'ylabel': lblr.getYlab(metric, variable, anom="anomaly"), 'bins': h } if not 'tas' in variable: plot_dic2 = { 'data': histop, 'ftag': scen + 'PercentageAnomaly', 'ylabel': lblr.getYlab(metric, variable, anom="percentageAnomaly"), 'bins': hp } toplot = [plot_dic1, plot_dic2] else: toplot = [plot_dic1] else: cube = cube.data if np.nanmax(cube) - np.nanmin(cube) > np.nanmax(cube): levels = atlas_utils.binlevels(cube) else: levels = np.linspace(cube.min(), cube.max(), 10) histo, h = np.histogram(cube, bins=levels) plot_dic1 = { 'data': histo, 'ftag': scen, 'ylabel': lblr.getYlab(metric, variable, anom=""), 'bins': h } toplot = [plot_dic1] for p in toplot: f = plt.figure(figsize=lblr.getFigSize(None, 'nbModelHistogram')) ax = f.add_subplot(111) bin = p['bins'] middle = bin[0:-1] + ((bin[1::] - bin[0:-1]) / 2) barlist = ax.bar(bin[0:-1] + ((bin[1::] - bin[0:-1]) / 2), p['data'], edgecolor='black', width=(bin[1::] - bin[0:-1]), color='lightblue') dummy = np.array(barlist) for d in dummy[middle < 0]: d.set_color('lightslategray') d.set_edgecolor('black') try: ax.set_xlim( np.floor( np.nanmin((bin[0:-1])[(p['data']) > 0]) - (bin[1] - bin[0])), np.ceil( np.nanmax((bin[1::])[(p['data']) > 0]) + (bin[-1] - bin[-2]))) except ValueError: pass ax.set_xlabel(p['ylabel']) if cnst.LANGUAGE == 'ENGLISH': ax.set_ylabel('Number of models') else: ax.set_ylabel(u'Nombre de modèles') if not np.nansum(p['data']): ax.text(0.5, 0.5, 'Zero values', zorder=10) print metric, 'set text' ax.set_title(lblr.getTitle(metric, variable, season, scen, bc, region[1], anom=p['ftag']), fontsize=11) plt.savefig(outpath + os.sep + fdict['metric'] + '_' + fdict['variable'] + '_' + fdict['bc_res'] + '_' + fdict['season'] + '_' + region[0] + '_nbModelHistogram_' + p['ftag'] + '.png') plt.close(f)