def making_histogram_files(filename_list, wk_dir): """ Read in data and create histogram cubes, save these to netcdf files. Arguments: * filename_list List of precipitation timeseries files * wk_dir Path to output directory """ desc = {} for fname in filename_list: print("Loading cube for", fname) fname_tmp = os.path.basename(fname) hname = os.path.join(wk_dir, ".".join(fname_tmp.split(".")[:-1]) + "_hist.nc") ppndata1 = make_hist_maps.read_data_cube(fname) ppn_hist_cube = make_hist_maps.make_hist_ppn(ppndata1) iris.save(ppn_hist_cube, hname) desc.update({ os.path.relpath(hname, start=wk_dir): { "long_name": "iris histogram cubes", "description": "histograms saved individually for model and obs data" } }) update_json("data", desc, wk_dir + "/output.json") return
def plot_histogram_maps(hist_filename1, hist_filename2, runtitle, plotname_root): """ Plot histogram maps """ ppn_hist_cube1 = make_hist_maps.read_data_cube(hist_filename1) ppn_hist_cube2 = make_hist_maps.read_data_cube(hist_filename2) avg_rain_bins_a, avg_rain_bins_frac_a = make_hist_maps.calc_rain_contr( ppn_hist_cube1) avg_rain_bins_b, avg_rain_bins_frac_b = make_hist_maps.calc_rain_contr( ppn_hist_cube2) # (optional) Define how you want to lump the bins together (below is the default) all_ppn_bounds = [(0.005, 10.), (10., 50.), (50., 100.), (100., 3000.)] # Plot as actual contributions for specific region, e.g. 60 to 160E,10S to 10N plotname = '{}_actual_contributions.png'.format(plotname_root) plot_hist_maps.plot_rain_contr(avg_rain_bins_a, avg_rain_bins_b, plotname, runtitle, 'Timescale 1', 'Timescale 2', all_ppn_bounds, region=[60.0, -10.0, 160.0, 10.0]) # Plot as fractional contributions plotname = '{}_fractional_contributions.png'.format(plotname_root) plot_hist_maps.plot_rain_contr(avg_rain_bins_frac_a, avg_rain_bins_frac_b, plotname, runtitle, 'Timescale 1', 'Timescale 2', all_ppn_bounds, region=[60.0, -10.0, 160.0, 10.0], frac=1) return
def making_histogram_files(data_filename1, data_filename2, hist_filename1, hist_filename2): """ Read in data and create histogram cubes, save these to netcdf files. Arguments: * data_filename1,data_filename2 input filenames of time-varying precipitation data * hist_filename1,hist_filename2 chosen filenames for output histogram cubes (netcdf files) """ ppndata1 = make_hist_maps.read_data_cube(data_filename1) ppn_hist_cube1 = make_hist_maps.make_hist_ppn(ppndata1) iris.save(ppn_hist_cube1, hist_filename1) ppndata2 = make_hist_maps.read_data_cube(data_filename2) ppn_hist_cube2 = make_hist_maps.make_hist_ppn(ppndata2) iris.save(ppn_hist_cube2, hist_filename2) return
def plot_metric(filename1, filename2, dataname1, dataname2, season, timescale, dates, maskfile, plotname): """ Args: * filename1,filename2: files containing histogram counts for two datasets, which must be on the same grid. filename2 data are the baseline (e.g. obs) against which data in filename1 will be compared. * dataname1,dataname2: names of datasets in filename1, filename2 (e.g. a model name and obs name, or two model names) * season: string descriptor of season for which data in filenames apply (e.g. 'JJA') * timescale: string descriptor of time frequency of original data (e.g. '3-hourly') * dates: string descriptor of dates to which histograms apply * maskfile: filename for land/sea fraction dataset (on same grid as both filenames) with values ranging from 0.0 = no land, to 1.0 = all land. * plotname: filename for plot Example usage: index, indexl, indexs, indextrop, indexnhml, indexshml=plot_metric(filename1,filename2,dataname,season,'3-hourly','1990-2014',maskfile,plotname) """ seasonc = season.upper() ppn_hist_cube1 = read_data_cube(filename1) ppn_hist_cube2 = read_data_cube(filename2) tot_rain_bins_seas_a, tot_rain_bins_frac_a = calc_rain_contr( ppn_hist_cube1) tot_rain_bins_seas_b, tot_rain_bins_frac_b = calc_rain_contr( ppn_hist_cube2) # Limit region to 60S-60N to fit with use of GPM-IMERG observations # Could be omitted or made optional according to which two datasets are being compared. ce = iris.coords.CoordExtent('longitude', 0.0, 360.0) ce2 = iris.coords.CoordExtent('latitude', -59, 59) tot_rain_bins_frac_a.coord('longitude').circular = True if tot_rain_bins_frac_a.coord('longitude').bounds is None: tot_rain_bins_frac_a.coord('longitude').guess_bounds() tot_rain_bins_frac_a.coord('latitude').guess_bounds() tot_rain_bins_frac_a1 = tot_rain_bins_frac_a.intersection(ce, ce2) tot_rain_bins_frac_b.coord('longitude').circular = True if tot_rain_bins_frac_b.coord('longitude').bounds is None: tot_rain_bins_frac_b.coord('longitude').guess_bounds() tot_rain_bins_frac_b.coord('latitude').guess_bounds() tot_rain_bins_frac_b1 = tot_rain_bins_frac_b.intersection(ce, ce2) minf = tot_rain_bins_frac_a1.copy() minf.data = np.minimum(tot_rain_bins_frac_a1.data, tot_rain_bins_frac_b1.data) skill = minf.collapsed('precipitation_flux', iris.analysis.SUM, mdtol=1) # Get mask (currently total rain <1 mm/day) bin_mid2 = np.exp( np.log(0.005) + np.sqrt( np.linspace(0.5, 98.5, 99) * ((np.square(np.log(120.) - np.log(0.005))) / 59.))) bin_mid = np.zeros(100) bin_mid[1:] = bin_mid2[0:99] bin_mid3 = bin_mid.reshape(100, 1, 1) lshista = iris.load_cube(filename1) tot_events = lshista.collapsed('precipitation_flux', iris.analysis.SUM) tot_events.data = np.ma.masked_where(tot_events.data == 0.0, tot_events.data) tot_rain_bins = lshista * bin_mid3 tot_rain_boxa = tot_rain_bins.collapsed('precipitation_flux', iris.analysis.SUM) / tot_events lshistb = iris.load_cube(filename2) lshistb.coord('latitude').coord_system = lshista.coord( 'latitude').coord_system lshistb.coord('longitude').coord_system = lshista.coord( 'longitude').coord_system tot_events = lshistb.collapsed('precipitation_flux', iris.analysis.SUM) tot_events.data = np.ma.masked_where(tot_events.data == 0.0, tot_events.data) tot_rain_bins = lshistb * bin_mid3 tot_rain_boxb = tot_rain_bins.collapsed('precipitation_flux', iris.analysis.SUM) / tot_events tot_rain_boxa.data = np.ma.masked_where(tot_rain_boxa.data < 1.0, tot_rain_boxa.data) tot_rain_boxb.data = np.ma.masked_where(tot_rain_boxb.data < 1.0, tot_rain_boxb.data) tot_events = tot_rain_boxa * tot_rain_boxb # Extract same area as skill is calculated for tot_events.coord('longitude').circular = True if tot_events.coord('longitude').bounds is None: tot_events.coord('longitude').guess_bounds() tot_events.coord('latitude').guess_bounds() tot_events1 = tot_events.intersection(ce, ce2) # Apply land/sea mask to get sea or land only lsmask = iris.load_cube(maskfile) lsmask.coord('latitude').coord_system = lshista.coord( 'latitude').coord_system lsmask.coord('longitude').coord_system = lshista.coord( 'longitude').coord_system lsmask.coord('longitude').circular = True if lsmask.coord('longitude').bounds is None: lsmask.coord('longitude').guess_bounds() lsmask.coord('latitude').guess_bounds() lsmasks = lsmask.intersection(ce, ce2) lsmaskl = lsmask.intersection(ce, ce2) lsmasks.data = np.ma.masked_where(lsmasks.data >= 0.5, lsmasks.data) lsmaskl.data = np.ma.masked_where(lsmaskl.data < 0.5, lsmaskl.data) lsmasksea = lsmasks * tot_events1 lsmaskland = lsmaskl * tot_events1 # Mask the skill cube and calculate index as rms over region skill.data.mask = tot_events1.data.mask grid_areas = iris.analysis.cartography.area_weights(skill) index = skill.collapsed(['latitude', 'longitude'], iris.analysis.RMS, weights=grid_areas) skilltrop = skill.extract( iris.Constraint(latitude=lambda cell: -15 <= cell <= 15)) grid_areas = iris.analysis.cartography.area_weights(skilltrop) indextrop = skilltrop.collapsed(['latitude', 'longitude'], iris.analysis.RMS, weights=grid_areas) skillnhml = skill.extract( iris.Constraint(latitude=lambda cell: 30 <= cell <= 60)) grid_areas = iris.analysis.cartography.area_weights(skillnhml) indexnhml = skillnhml.collapsed(['latitude', 'longitude'], iris.analysis.RMS, weights=grid_areas) skillshml = skill.extract( iris.Constraint(latitude=lambda cell: -60 <= cell <= -30)) grid_areas = iris.analysis.cartography.area_weights(skillshml) indexshml = skillshml.collapsed(['latitude', 'longitude'], iris.analysis.RMS, weights=grid_areas) skill = minf.collapsed('precipitation_flux', iris.analysis.SUM, mdtol=1) skill.data.mask = lsmasksea.data.mask skilltst = skill.extract( iris.Constraint(latitude=lambda cell: -30 <= cell <= 30)) grid_areas = iris.analysis.cartography.area_weights(skilltst) indexs = skilltst.collapsed(['latitude', 'longitude'], iris.analysis.RMS, weights=grid_areas) skill = minf.collapsed('precipitation_flux', iris.analysis.SUM, mdtol=1) skill.data.mask = lsmaskland.data.mask skilltst = skill.extract( iris.Constraint(latitude=lambda cell: -30 <= cell <= 30)) grid_areas = iris.analysis.cartography.area_weights(skilltst) indexl = skilltst.collapsed(['latitude', 'longitude'], iris.analysis.RMS, weights=grid_areas) print( dataname1, dataname2, 'Index = %0.2f, Index (land) = %0.2f, Index (sea) = %0.2f, Index (tropics) = %0.2f, Index (NH mid-lat) = %0.2f, Index (SH mid-lat) = %0.2f' % (index.data, indexl.data, indexs.data, indextrop.data, indexnhml.data, indexshml.data)) skill = minf.collapsed('precipitation_flux', iris.analysis.SUM, mdtol=1) skill.data.mask = tot_events1.data.mask cf = qplt.pcolormesh(skill, vmin=0.5, vmax=0.95) ax = plt.gca() gl = ax.gridlines(draw_labels=True) gl.xlabels_top = False ax.coastlines() plt.title(dataname1 + ' vs ' + dataname2 + ' ' + timescale + ' ' + seasonc + ' ' + dates + '\nIndex = %0.2f' % (index.data)) plt.savefig(plotname) plt.close() return index, indexl, indexs, indextrop, indexnhml, indexshml
def plot_histogram_maps(hist_filenames, plotname_root, wk_dir, region, ext, settings): """ Plot histogram maps """ hist_filename1 = hist_filenames[0] hist_filename2 = hist_filenames[1] ppn_hist_cube1 = make_hist_maps.read_data_cube(hist_filename1) ppn_hist_cube2 = make_hist_maps.read_data_cube(hist_filename2) avg_rain_bins_a, avg_rain_bins_frac_a = make_hist_maps.calc_rain_contr( ppn_hist_cube1) avg_rain_bins_b, avg_rain_bins_frac_b = make_hist_maps.calc_rain_contr( ppn_hist_cube2) ppn_names = make_runtitle([hist_filename1, hist_filename2], settings) ppn1_name = ppn_names[hist_filename1].replace("_", " ") ppn2_name = ppn_names[hist_filename2].replace("_", " ") names = make_runtitle([hist_filename1, hist_filename2], settings, model_only=True) runtitle = "{0} vs {1}".format(names[hist_filename1].replace("_", " "), names[hist_filename2].replace("_", " ")) # (optional) Define how you want to lump the bins together (below is the default) all_ppn_bounds = [(0.005, 10.), (10., 50.), (50., 100.), (100., 3000.)] # Plot as actual contributions for specific region, e.g. 60 to 160E,10S to 10N desc = {} plotname = '{0}_actual_contributions{1}'.format(plotname_root, ext) plotname = os.path.join(wk_dir, plotname) plot_hist_maps.plot_rain_contr(avg_rain_bins_a, avg_rain_bins_b, plotname, runtitle, ppn1_name, ppn2_name, all_ppn_bounds, region=region) desc.update({ os.path.relpath(plotname, start=wk_dir): { "description": "Actual contribution of each timescale for region {0}".format( region) } }) # Plot as fractional contributions plotname = '{0}_fractional_contributions{1}'.format(plotname_root, ext) plotname = os.path.join(wk_dir, plotname) plot_hist_maps.plot_rain_contr(avg_rain_bins_frac_a, avg_rain_bins_frac_b, plotname, runtitle, ppn1_name, ppn2_name, all_ppn_bounds, region=region, frac=1) desc.update({ os.path.relpath(plotname, start=wk_dir): { "description": "Fractional contribution of each timescale for region {0}".format( region) } }) update_json("plots", desc, wk_dir + "/output.json") return