コード例 #1
0
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
        obs_dataset, obs_name, model_datasets, model_names, file_name):

    # calculate climatological mean fields
    obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons,
                                  obs_dataset.times,
                                  utils.calc_temporal_mean(obs_dataset))
    model_clim_datasets = []
    for dataset in model_datasets:
        model_clim_datasets.append(
            ds.Dataset(dataset.lats, dataset.lons, dataset.times,
                       utils.calc_temporal_mean(dataset)))

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(
        obs_clim_dataset,  # Climatological mean of reference dataset for the evaluation
        model_clim_datasets,  # list of climatological means from model datasets for the evaluation
        [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run()

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(taylor_data,
                                model_names,
                                obs_name,
                                file_name,
                                pos='upper right',
                                frameon=False)
コード例 #2
0
ファイル: metrics_and_plots.py プロジェクト: CWSL/climate
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
        obs_dataset, obs_name, model_datasets, model_names, file_name):

    # calculate climatological mean fields
    obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons,
                                  obs_dataset.times,
                                  utils.calc_temporal_mean(obs_dataset))
    model_clim_datasets = []
    for dataset in model_datasets:
        model_clim_datasets.append(
            ds.Dataset(dataset.lats, dataset.lons, dataset.times,
                       utils.calc_temporal_mean(dataset)))

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(
        obs_clim_dataset,  # Climatological mean of reference dataset for the evaluation
        model_clim_datasets,  # list of climatological means from model datasets for the evaluation
        [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run()

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(
        taylor_data,
        model_names,
        obs_name,
        file_name,
        pos='upper right',
        frameon=False)
コード例 #3
0
ファイル: metrics_and_plots.py プロジェクト: webaskin/climate
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
        obs_dataset, obs_name, model_datasets, model_names, file_name):

    # calculate climatological mean fields
    obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
    for dataset in model_datasets:
        dataset.values = utils.calc_temporal_mean(dataset)

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(
        obs_dataset,  # Reference dataset for the evaluation
        model_datasets,  # list of target datasets for the evaluation
        [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run()

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(taylor_data,
                                model_names,
                                obs_name,
                                file_name,
                                pos='upper right',
                                frameon=False)
コード例 #4
0
ファイル: test_utils.py プロジェクト: skbaum/climate
    def test_returned_mean(self):
        mean_values = np.array([[22.5, 23.5, 24.5],
                                [25.5, 26.5, 27.5],
                                [28.5, 29.5, 30.5]])

        result = utils.calc_temporal_mean(self.test_dataset)
        np.testing.assert_array_equal(result, mean_values)
コード例 #5
0
ファイル: metrics_and_plots.py プロジェクト: pwcberry/climate
def Map_plot_bias_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
                                      file_name, row, column):
    '''Draw maps of observed multi-year climatology and biases of models"'''

    # calculate climatology of observation data
    obs_clim = utils.calc_temporal_mean(obs_dataset)
    # determine the metrics
    map_of_bias = metrics.TemporalMeanBias()

    # create the Evaluation object
    bias_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
                                 model_datasets, # list of target datasets for the evaluation
                                 [map_of_bias, map_of_bias])

    # run the evaluation (bias calculation)
    bias_evaluation.run() 

    rcm_bias = bias_evaluation.results[0]

    fig = plt.figure()

    lat_min = obs_dataset.lats.min()
    lat_max = obs_dataset.lats.max()
    lon_min = obs_dataset.lons.min()
    lon_max = obs_dataset.lons.max()

    string_list = list(string.ascii_lowercase) 
    ax = fig.add_subplot(row,column,1)
    m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
            llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
    lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)

    x,y = m(lons, lats)

    m.drawcoastlines(linewidth=1)
    m.drawcountries(linewidth=1)
    m.drawstates(linewidth=0.5, color='w')
    max = m.contourf(x,y,obs_clim,levels = plotter._nice_intervals(obs_dataset.values, 10), extend='both',cmap='PuOr')
    ax.annotate('(a) \n' + obs_name,xy=(lon_min, lat_min))
    cax = fig.add_axes([0.02, 1.-float(1./row), 0.01, 1./row*0.6])
    plt.colorbar(max, cax = cax) 
    clevs = plotter._nice_intervals(rcm_bias, 11)
    for imodel in np.arange(len(model_datasets)):
        ax = fig.add_subplot(row, column,2+imodel)
        m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
                llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
        m.drawcoastlines(linewidth=1)
        m.drawcountries(linewidth=1)
        m.drawstates(linewidth=0.5, color='w')
        max = m.contourf(x,y,rcm_bias[imodel,:],levels = clevs, extend='both', cmap='RdBu_r')
        ax.annotate('('+string_list[imodel+1]+')  \n '+model_names[imodel],xy=(lon_min, lat_min))

    cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
    plt.colorbar(max, cax = cax) 

    plt.subplots_adjust(hspace=0.01,wspace=0.05)

    plt.show()
    fig.savefig(file_name,dpi=600,bbox_inches='tight')
コード例 #6
0
ファイル: metrics_and_plots.py プロジェクト: pwcberry/climate
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
                                      file_name):

    # calculate climatological mean fields
    obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
    for dataset in model_datasets:
        dataset.values = utils.calc_temporal_mean(dataset)

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
                                 model_datasets, # list of target datasets for the evaluation
                                 [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run() 

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right',frameon=False)
コード例 #7
0
                                                 temporal_resolution='monthly')
    target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])

#Regrid
print("... regrid")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)

for member, each_target_dataset in enumerate(target_datasets):
    target_datasets[member] = dsp.spatial_regrid(target_datasets[member],
                                                 new_lats, new_lons)

#find the mean values
#way to get the mean. Note the function exists in util.py as def calc_climatology_year(dataset):
CRU31.values = utils.calc_temporal_mean(CRU31)

#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = "ENS"

#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)

for member, each_target_dataset in enumerate(target_datasets):
    target_datasets[member].values = utils.calc_temporal_mean(
        target_datasets[member])

allNames = []

for target in target_datasets:
コード例 #8
0
def Map_plot_bias_of_multiyear_climatology(obs_dataset,
                                           obs_name,
                                           model_datasets,
                                           model_names,
                                           file_name,
                                           row,
                                           column,
                                           map_projection=None):
    '''Draw maps of observed multi-year climatology and biases of models"'''

    # calculate climatology of observation data
    obs_clim = utils.calc_temporal_mean(obs_dataset)
    # determine the metrics
    map_of_bias = metrics.TemporalMeanBias()

    # create the Evaluation object
    bias_evaluation = Evaluation(
        obs_dataset,  # Reference dataset for the evaluation
        model_datasets,  # list of target datasets for the evaluation
        [map_of_bias, map_of_bias])
    # run the evaluation (bias calculation)
    bias_evaluation.run()

    rcm_bias = bias_evaluation.results[0]

    fig = plt.figure()

    lat_min = obs_dataset.lats.min()
    lat_max = obs_dataset.lats.max()
    lon_min = obs_dataset.lons.min()
    lon_max = obs_dataset.lons.max()

    string_list = list(string.ascii_lowercase)
    ax = fig.add_subplot(row, column, 1)
    if map_projection == 'npstere':
        m = Basemap(ax=ax,
                    projection='npstere',
                    boundinglat=lat_min,
                    lon_0=0,
                    resolution='l',
                    fix_aspect=False)
    else:
        m = Basemap(ax=ax,
                    projection='cyl',
                    llcrnrlat=lat_min,
                    urcrnrlat=lat_max,
                    llcrnrlon=lon_min,
                    urcrnrlon=lon_max,
                    resolution='l',
                    fix_aspect=False)
    lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)

    x, y = m(lons, lats)

    m.drawcoastlines(linewidth=1)
    m.drawcountries(linewidth=1)
    m.drawstates(linewidth=0.5, color='w')
    max = m.contourf(x,
                     y,
                     obs_clim,
                     levels=plotter._nice_intervals(obs_dataset.values, 10),
                     extend='both',
                     cmap='rainbow')
    ax.annotate('(a) \n' + obs_name, xy=(lon_min, lat_min))
    cax = fig.add_axes([0.02, 1. - float(1. / row), 0.01, 1. / row * 0.6])
    plt.colorbar(max, cax=cax)
    clevs = plotter._nice_intervals(rcm_bias, 11)
    for imodel in np.arange(len(model_datasets)):

        ax = fig.add_subplot(row, column, 2 + imodel)
        if map_projection == 'npstere':
            m = Basemap(ax=ax,
                        projection='npstere',
                        boundinglat=lat_min,
                        lon_0=0,
                        resolution='l',
                        fix_aspect=False)
        else:
            m = Basemap(ax=ax,
                        projection='cyl',
                        llcrnrlat=lat_min,
                        urcrnrlat=lat_max,
                        llcrnrlon=lon_min,
                        urcrnrlon=lon_max,
                        resolution='l',
                        fix_aspect=False)
        m.drawcoastlines(linewidth=1)
        m.drawcountries(linewidth=1)
        m.drawstates(linewidth=0.5, color='w')
        max = m.contourf(x,
                         y,
                         rcm_bias[imodel, :],
                         levels=clevs,
                         extend='both',
                         cmap='RdBu_r')
        ax.annotate('(' + string_list[imodel + 1] + ')  \n ' +
                    model_names[imodel],
                    xy=(lon_min, lat_min))

    cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
    plt.colorbar(max, cax=cax)

    plt.subplots_adjust(hspace=0.01, wspace=0.05)

    fig.savefig(file_name, dpi=600, bbox_inches='tight')
コード例 #9
0
	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
	target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30)) 
	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])	
	
#Regrid
print("... regrid")
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)

for member, each_target_dataset in enumerate(target_datasets):
	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
	
#find the mean values
#way to get the mean. Note the function exists in util.py as def calc_climatology_year(dataset):
CRU31.values = utils.calc_temporal_mean(CRU31)

#make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name="ENS"

#append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)

for member, each_target_dataset in enumerate(target_datasets):
	target_datasets[member].values = utils.calc_temporal_mean(target_datasets[member])
	
allNames =[]

for target in target_datasets:
	allNames.append(target.name)
コード例 #10
0
FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "wrf_bias_compared_to_knmi"

FILE_1_PATH = path.join('/tmp', FILE_1)
FILE_2_PATH = path.join('/tmp', FILE_2)

if not path.exists(FILE_1_PATH):
    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
if not path.exists(FILE_2_PATH):
    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)

""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))

print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_dataset.values.shape,))

""" Step 2: Calculate seasonal average """
print("Calculate seasonal average")
knmi_DJF_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=12, month_end=2, target_dataset=knmi_dataset))
wrf_DJF_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=12, month_end=2, target_dataset=wrf_dataset))
print("Seasonally averaged KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_DJF_mean.shape,))
print("Seasonally averaged wrf_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_DJF_mean.shape,))
knmi_JJA_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=6, month_end=8, target_dataset=knmi_dataset))
wrf_JJA_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=6, month_end=8, target_dataset=wrf_dataset))