コード例 #1
0
    def test_full_init(self):
        self.eval = Evaluation(
            self.test_dataset, [self.test_dataset, self.another_test_dataset],
            [Bias(), Bias(), TemporalStdDev()])
        ref_dataset = self.test_dataset
        target_datasets = [self.test_dataset, self.another_test_dataset]
        metrics = [Bias(), Bias()]
        unary_metrics = [TemporalStdDev()]

        self.eval = Evaluation(ref_dataset, target_datasets,
                               metrics + unary_metrics)

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)
        self.eval.run()
        out_str = ("<Evaluation - ref_dataset: {}, "
                   "target_dataset(s): {}, "
                   "binary_metric(s): {}, "
                   "unary_metric(s): {}, "
                   "subregion(s): {}>").format(str(
                       self.test_dataset), [str(ds) for ds in target_datasets],
                                               [str(m) for m in metrics],
                                               [str(u)
                                                for u in unary_metrics], None)
        self.assertEqual(str(self.eval), out_str)
コード例 #2
0
 def test_bias_output_shape(self):
     bias_eval = Evaluation(self.test_dataset, [
                            self.another_test_dataset], [Bias()])
     bias_eval.run()
     input_shape = tuple(self.test_dataset.values.shape)
     bias_results_shape = tuple(bias_eval.results[0][0].shape)
     self.assertEqual(input_shape, bias_results_shape)
コード例 #3
0
ファイル: test_evaluation.py プロジェクト: d-vf/climate
    def test_subregion_unary_result_shape(self):
        bound = Bounds(
                10, 18, 
                100, 108, 
                dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))

        new_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [TemporalStdDev()],
            [bound]
        )
        new_eval.run()

        # Expected result shape is
        # [
        #   [   
        #       [   # Subregions cause this extra layer
        #           temporalstddev.run(reference),
        #           temporalstddev.run(target1),
        #           temporalstddev.run(target2)
        #       ]
        #   ]
        # ]
        self.assertTrue(len(new_eval.unary_results) == 1)
        self.assertTrue(type(new_eval.unary_results) == type([]))

        self.assertTrue(len(new_eval.unary_results[0]) == 1)

        self.assertTrue(len(new_eval.unary_results[0][0]) == 3)
コード例 #4
0
ファイル: metrics_and_plots.py プロジェクト: CWSL/climate
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
        obs_dataset, obs_name, model_datasets, model_names, file_name):

    # calculate climatological mean fields
    obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons,
                                  obs_dataset.times,
                                  utils.calc_temporal_mean(obs_dataset))
    model_clim_datasets = []
    for dataset in model_datasets:
        model_clim_datasets.append(
            ds.Dataset(dataset.lats, dataset.lons, dataset.times,
                       utils.calc_temporal_mean(dataset)))

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(
        obs_clim_dataset,  # Climatological mean of reference dataset for the evaluation
        model_clim_datasets,  # list of climatological means from model datasets for the evaluation
        [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run()

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(
        taylor_data,
        model_names,
        obs_name,
        file_name,
        pos='upper right',
        frameon=False)
コード例 #5
0
ファイル: metrics_and_plots.py プロジェクト: webaskin/climate
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
        obs_dataset, obs_name, model_datasets, model_names, file_name):

    # calculate climatological mean fields
    obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
    for dataset in model_datasets:
        dataset.values = utils.calc_temporal_mean(dataset)

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(
        obs_dataset,  # Reference dataset for the evaluation
        model_datasets,  # list of target datasets for the evaluation
        [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run()

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(taylor_data,
                                model_names,
                                obs_name,
                                file_name,
                                pos='upper right',
                                frameon=False)
コード例 #6
0
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(
        obs_dataset, obs_name, model_datasets, model_names, file_name):

    # calculate climatological mean fields
    obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons,
                                  obs_dataset.times,
                                  utils.calc_temporal_mean(obs_dataset))
    model_clim_datasets = []
    for dataset in model_datasets:
        model_clim_datasets.append(
            ds.Dataset(dataset.lats, dataset.lons, dataset.times,
                       utils.calc_temporal_mean(dataset)))

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(
        obs_clim_dataset,  # Climatological mean of reference dataset for the evaluation
        model_clim_datasets,  # list of climatological means from model datasets for the evaluation
        [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run()

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(taylor_data,
                                model_names,
                                obs_name,
                                file_name,
                                pos='upper right',
                                frameon=False)
コード例 #7
0
    def test_subregion_unary_result_shape(self):
        bound = Bounds(
            lat_min=10, lat_max=18,
            lon_min=100, lon_max=108,
            start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))

        new_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [TemporalStdDev(), TemporalStdDev()],
            [bound, bound, bound, bound, bound]
        )
        new_eval.run()

        # Expected result shape is
        # [
        #       [   # Subregions cause this extra layer
        #           [3, temporalstddev.run(reference).shape],
        #       ]
        # ]

        # 5 = number of subregions
        self.assertTrue(len(new_eval.unary_results) == 5)
        # number of metrics
        self.assertTrue(len(new_eval.unary_results[0]) == 2)
        self.assertTrue(isinstance(new_eval.unary_results, type([])))
        # number of datasets (ref + target)
        self.assertTrue(new_eval.unary_results[0][0].shape[0] == 3)
コード例 #8
0
ファイル: metrics_and_plots.py プロジェクト: pwcberry/climate
def Map_plot_bias_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
                                      file_name, row, column):
    '''Draw maps of observed multi-year climatology and biases of models"'''

    # calculate climatology of observation data
    obs_clim = utils.calc_temporal_mean(obs_dataset)
    # determine the metrics
    map_of_bias = metrics.TemporalMeanBias()

    # create the Evaluation object
    bias_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
                                 model_datasets, # list of target datasets for the evaluation
                                 [map_of_bias, map_of_bias])

    # run the evaluation (bias calculation)
    bias_evaluation.run() 

    rcm_bias = bias_evaluation.results[0]

    fig = plt.figure()

    lat_min = obs_dataset.lats.min()
    lat_max = obs_dataset.lats.max()
    lon_min = obs_dataset.lons.min()
    lon_max = obs_dataset.lons.max()

    string_list = list(string.ascii_lowercase) 
    ax = fig.add_subplot(row,column,1)
    m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
            llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
    lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)

    x,y = m(lons, lats)

    m.drawcoastlines(linewidth=1)
    m.drawcountries(linewidth=1)
    m.drawstates(linewidth=0.5, color='w')
    max = m.contourf(x,y,obs_clim,levels = plotter._nice_intervals(obs_dataset.values, 10), extend='both',cmap='PuOr')
    ax.annotate('(a) \n' + obs_name,xy=(lon_min, lat_min))
    cax = fig.add_axes([0.02, 1.-float(1./row), 0.01, 1./row*0.6])
    plt.colorbar(max, cax = cax) 
    clevs = plotter._nice_intervals(rcm_bias, 11)
    for imodel in np.arange(len(model_datasets)):
        ax = fig.add_subplot(row, column,2+imodel)
        m = Basemap(ax=ax, projection ='cyl', llcrnrlat = lat_min, urcrnrlat = lat_max,
                llcrnrlon = lon_min, urcrnrlon = lon_max, resolution = 'l', fix_aspect=False)
        m.drawcoastlines(linewidth=1)
        m.drawcountries(linewidth=1)
        m.drawstates(linewidth=0.5, color='w')
        max = m.contourf(x,y,rcm_bias[imodel,:],levels = clevs, extend='both', cmap='RdBu_r')
        ax.annotate('('+string_list[imodel+1]+')  \n '+model_names[imodel],xy=(lon_min, lat_min))

    cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
    plt.colorbar(max, cax = cax) 

    plt.subplots_adjust(hspace=0.01,wspace=0.05)

    plt.show()
    fig.savefig(file_name,dpi=600,bbox_inches='tight')
コード例 #9
0
    def test_result_shape(self):
        bias_eval = Evaluation(self.test_dataset, [
            self.another_test_dataset, self.another_test_dataset,
            self.another_test_dataset
        ], [Bias(), Bias()])
        bias_eval.run()

        # Expected result shape is
        # [bias, bias] where bias.shape[0] = number of datasets
        self.assertTrue(len(bias_eval.results) == 2)
        self.assertTrue(bias_eval.results[0].shape[0] == 3)
コード例 #10
0
ファイル: test_evaluation.py プロジェクト: CWSL/climate
    def test_result_shape(self):
        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
            [Bias(), Bias()]
        )
        bias_eval.run()

        # Expected result shape is
        # [bias, bias] where bias.shape[0] = number of datasets
        self.assertTrue(len(bias_eval.results) == 2)
        self.assertTrue(bias_eval.results[0].shape[0] == 3)
コード例 #11
0
    def test_unary_result_shape(self):
        new_eval = Evaluation(self.test_dataset, [
            self.another_test_dataset, self.another_test_dataset,
            self.another_test_dataset, self.another_test_dataset
        ], [TemporalStdDev()])
        new_eval.run()

        # Expected result shape is
        # [stddev] where stddev.shape[0] = number of datasets

        self.assertTrue(len(new_eval.unary_results) == 1)
        self.assertTrue(new_eval.unary_results[0].shape[0] == 5)
コード例 #12
0
ファイル: test_processing.py プロジェクト: webaskin/climate
    def setUp(self):
        self.full_evaluation = Evaluation(
            _create_fake_dataset('Ref'),
            [_create_fake_dataset('T1'), _create_fake_dataset('T2')],
            [metrics.TemporalStdDev(), metrics.Bias(), metrics.Bias()]
        )

        self.unary_evaluation = Evaluation(
            None,
            [_create_fake_dataset('T1'), _create_fake_dataset('T2')],
            [metrics.TemporalStdDev()]
        )
コード例 #13
0
ファイル: test_evaluation.py プロジェクト: CWSL/climate
    def test_unary_result_shape(self):
        new_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
            [TemporalStdDev()]
        )
        new_eval.run()

        # Expected result shape is
        # [stddev] where stddev.shape[0] = number of datasets
        
        self.assertTrue(len(new_eval.unary_results) == 1)
        self.assertTrue(new_eval.unary_results[0].shape[0] == 5)
コード例 #14
0
    def setUp(self):
        self.eval = Evaluation(None, [], [])

        lat = np.array([10, 12, 14, 16, 18])
        lon = np.array([100, 102, 104, 106, 108])
        time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
        flat_array = np.array(range(300))
        value = flat_array.reshape(12, 5, 5)
        self.variable = 'prec'
        self.other_var = 'temp'
        self.test_dataset = Dataset(lat, lon, time, value, self.variable)
        self.another_test_dataset = Dataset(lat, lon, time, value,
                                            self.other_var)
コード例 #15
0
ファイル: test_evaluation.py プロジェクト: d-vf/climate
    def test_unary_result_shape(self):
        new_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [TemporalStdDev()]
        )
        new_eval.run()

        # Expected result shape is
        # [
        #   temporalstddev.run(reference),
        #   temporalstddev.run(target1),
        #   temporalstddev.run(target2)
        # ]
        self.assertTrue(len(new_eval.unary_results) == 1)
        self.assertTrue(len(new_eval.unary_results[0]) == 3)
コード例 #16
0
ファイル: test_evaluation.py プロジェクト: imclab/climate
    def test_full_init(self):
        self.eval = Evaluation(
            self.test_dataset, [self.test_dataset, self.another_test_dataset],
            [Bias(), Bias(), TemporalStdDev()])

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)
コード例 #17
0
def generate_evaluation_from_config(config_data):
    """ Generate an Evaluation object from configuration data.

    :param config_data: Dictionary of the data parsed from the supplied YAML
        configuration file.
    :type config_data: :func:`dict`

    :returns: An Evaluation object containing the data specified in the
        supplied configuration data.
    """
    # Load datasets
    reference = None
    targets = []
    if config_data['datasets']:
        if 'reference' in config_data['datasets']:
            reference = _load_dataset(config_data['datasets']['reference'])

        if 'targets' in config_data['datasets']:
            targets = [
                _load_dataset(t) for t in config_data['datasets']['targets']
            ]

        reference, targets = _prepare_datasets_for_evaluation(
            reference, targets, config_data)
    # Load metrics
    eval_metrics = []
    if config_data['metrics']:
        eval_metrics = [_load_metric(m)() for m in config_data['metrics']]

    # Load Subregions (if present)
    subregions = None
    if 'subregions' in config_data:
        subregions = [_load_subregion(s) for s in config_data['subregions']]

    return Evaluation(reference, targets, eval_metrics, subregions=subregions)
コード例 #18
0
ファイル: test_evaluation.py プロジェクト: d-vf/climate
    def test_result_shape(self):
        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [Bias()]
        )
        bias_eval.run()

        # Expected result shape is
        # [
        #   [
        #       bias.run(reference, target1)
        #   ],
        #   [
        #       bias.run(reference, target2)
        #   ]
        # ]
        self.assertTrue(len(bias_eval.results) == 2)
        self.assertTrue(len(bias_eval.results[0]) == 1)
        self.assertTrue(len(bias_eval.results[1]) == 1)
コード例 #19
0
    def test_subregion_result_shape(self):
        bound = Bounds(10, 18, 100, 108, dt.datetime(2000, 1, 1),
                       dt.datetime(2000, 3, 1))

        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset], [Bias()],
            [bound])
        bias_eval.run()

        # Expected result shape is
        # [
        #       [   # Subregions cause this extra layer
        #           [number of targets, bias.run(reference, target1).shape]
        #       ]
        #   ],
        self.assertTrue(len(bias_eval.results) == 1)
        self.assertTrue(len(bias_eval.results[0]) == 1)
        self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
        self.assertTrue(isinstance(bias_eval.results, type([])))
コード例 #20
0
    def test_default_data_return(self):
        new_eval = Evaluation(None, [], [])
        default_output = {
            'temporal_time_delta': 999,
            'spatial_regrid_lats': (-90, 90, 1),
            'spatial_regrid_lons': (-180, 180, 1),
            'subset': [-90, 90, -180, 180, "1500-01-01", "2500-01-01"],
        }

        out = writer.generate_evaluation_information(new_eval)

        self.assertEquals(default_output, out)
コード例 #21
0
    def test_full_init(self):
        self.eval = Evaluation(
            self.test_dataset,
            [self.test_dataset, self.another_test_dataset],
            [Bias(), Bias(), TemporalStdDev()])
        ref_dataset = self.test_dataset
        target_datasets = [self.test_dataset, self.another_test_dataset]
        metrics = [Bias(), Bias()]
        unary_metrics = [TemporalStdDev()]

        self.eval = Evaluation(ref_dataset,
                               target_datasets,
                               metrics + unary_metrics)

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)
        self.eval.run()
        out_str = (
            "<Evaluation - ref_dataset: {}, "
            "target_dataset(s): {}, "
            "binary_metric(s): {}, "
            "unary_metric(s): {}, "
            "subregion(s): {}>"
        ).format(
            str(self.test_dataset),
            [str(ds) for ds in target_datasets],
            [str(m) for m in metrics],
            [str(u) for u in unary_metrics],
            None
        )
        self.assertEqual(str(self.eval), out_str)
コード例 #22
0
ファイル: metrics_and_plots.py プロジェクト: pwcberry/climate
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names,
                                      file_name):

    # calculate climatological mean fields
    obs_dataset.values = utils.calc_temporal_mean(obs_dataset)
    for dataset in model_datasets:
        dataset.values = utils.calc_temporal_mean(dataset)

    # Metrics (spatial standard deviation and pattern correlation)
    # determine the metrics
    taylor_diagram = metrics.SpatialPatternTaylorDiagram()

    # create the Evaluation object
    taylor_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation
                                 model_datasets, # list of target datasets for the evaluation
                                 [taylor_diagram])

    # run the evaluation (bias calculation)
    taylor_evaluation.run() 

    taylor_data = taylor_evaluation.results[0]

    plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right',frameon=False)
コード例 #23
0
    def setUpClass(self):
        self.lats = np.array(range(-10, 10, 1))
        self.lons = np.array(range(-20, 20, 1))
        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
        flat_array = np.array(range(9600))
        self.values = flat_array.reshape(12, 20, 40)

        self.dataset = Dataset(
            self.lats,
            self.lons,
            self.times,
            self.values,
        )

        self.evaluation = Evaluation(self.dataset, [], [])
コード例 #24
0
ファイル: test_evaluation.py プロジェクト: d-vf/climate
    def test_subregion_result_shape(self):
        bound = Bounds(
                10, 18, 
                100, 108, 
                dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))

        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [Bias()],
            [bound]
        )
        bias_eval.run()

        # Expected result shape is
        # [
        #   [
        #       [   # Subregions cause this extra layer
        #           bias.run(reference, target1)
        #       ]
        #   ],
        #   [
        #       [
        #           bias.run(reference, target2)
        #       ]
        #   ]
        # ]
        self.assertTrue(len(bias_eval.results) == 2)

        self.assertTrue(len(bias_eval.results[0]) == 1)
        self.assertTrue(type(bias_eval.results[0]) == type([]))
        self.assertTrue(len(bias_eval.results[1]) == 1)
        self.assertTrue(type(bias_eval.results[1]) == type([]))

        self.assertTrue(len(bias_eval.results[0][0]) == 1)
        self.assertTrue(len(bias_eval.results[1][0]) == 1)
コード例 #25
0
    def test_subregion_result_shape(self):
        bound = Bounds(
            lat_min=10, lat_max=18,
            lon_min=100, lon_max=108,
            start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))

        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [Bias()],
            [bound]
        )
        bias_eval.run()

        # Expected result shape is
        # [
        #       [   # Subregions cause this extra layer
        #           [number of targets, bias.run(reference, target1).shape]
        #       ]
        #   ],
        self.assertTrue(len(bias_eval.results) == 1)
        self.assertTrue(len(bias_eval.results[0]) == 1)
        self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
        self.assertTrue(isinstance(bias_eval.results, type([])))
コード例 #26
0
    def test_yearly_temporal_bin(self):
        new_times = np.array(
            [dt.datetime(2000 + x, 1, 1) for x in range(1, 13)])

        dataset = Dataset(
            self.lats,
            self.lons,
            new_times,
            self.values,
        )
        new_eval = Evaluation(dataset, [], [])

        out = writer.generate_evaluation_information(new_eval)

        self.assertEquals(out['temporal_time_delta'], 366)
コード例 #27
0
ファイル: test_evaluation.py プロジェクト: AtomLaw/climate
    def test_full_init(self):
        self.eval = Evaluation(
                        self.test_dataset,           
                        [self.test_dataset, self.another_test_dataset], 
                        [Bias(), Bias(), TemporalStdDev()])                    

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)
コード例 #28
0
    def test_subset_with_multiple_datasets(self):
        new_ds = Dataset(np.arange(0, 20, 1), self.lons, self.times,
                         self.values)
        new_eval = Evaluation(self.dataset, [new_ds], [])

        out = writer.generate_evaluation_information(new_eval)
        subset = out['subset']

        ds_lat_min, ds_lat_max, ds_lon_min, ds_lon_max = self.dataset.spatial_boundaries(
        )
        start, end = self.dataset.temporal_boundaries()

        self.assertEqual(ds_lat_min, subset[0])
        # Check that we actually used the different max lat value that we
        # created by adding 'new_ds'.
        self.assertEqual(max(new_ds.lats), subset[1])
        self.assertEqual(ds_lon_min, subset[2])
        self.assertEqual(ds_lon_max, subset[3])
        self.assertEquals(str(start), subset[4])
        self.assertEquals(str(end), subset[5])
コード例 #29
0
def Map_plot_bias_of_multiyear_climatology(obs_dataset,
                                           obs_name,
                                           model_datasets,
                                           model_names,
                                           file_name,
                                           row,
                                           column,
                                           map_projection=None):
    '''Draw maps of observed multi-year climatology and biases of models"'''

    # calculate climatology of observation data
    obs_clim = utils.calc_temporal_mean(obs_dataset)
    # determine the metrics
    map_of_bias = metrics.TemporalMeanBias()

    # create the Evaluation object
    bias_evaluation = Evaluation(
        obs_dataset,  # Reference dataset for the evaluation
        model_datasets,  # list of target datasets for the evaluation
        [map_of_bias, map_of_bias])
    # run the evaluation (bias calculation)
    bias_evaluation.run()

    rcm_bias = bias_evaluation.results[0]

    fig = plt.figure()

    lat_min = obs_dataset.lats.min()
    lat_max = obs_dataset.lats.max()
    lon_min = obs_dataset.lons.min()
    lon_max = obs_dataset.lons.max()

    string_list = list(string.ascii_lowercase)
    ax = fig.add_subplot(row, column, 1)
    if map_projection == 'npstere':
        m = Basemap(ax=ax,
                    projection='npstere',
                    boundinglat=lat_min,
                    lon_0=0,
                    resolution='l',
                    fix_aspect=False)
    else:
        m = Basemap(ax=ax,
                    projection='cyl',
                    llcrnrlat=lat_min,
                    urcrnrlat=lat_max,
                    llcrnrlon=lon_min,
                    urcrnrlon=lon_max,
                    resolution='l',
                    fix_aspect=False)
    lons, lats = np.meshgrid(obs_dataset.lons, obs_dataset.lats)

    x, y = m(lons, lats)

    m.drawcoastlines(linewidth=1)
    m.drawcountries(linewidth=1)
    m.drawstates(linewidth=0.5, color='w')
    max = m.contourf(x,
                     y,
                     obs_clim,
                     levels=plotter._nice_intervals(obs_dataset.values, 10),
                     extend='both',
                     cmap='rainbow')
    ax.annotate('(a) \n' + obs_name, xy=(lon_min, lat_min))
    cax = fig.add_axes([0.02, 1. - float(1. / row), 0.01, 1. / row * 0.6])
    plt.colorbar(max, cax=cax)
    clevs = plotter._nice_intervals(rcm_bias, 11)
    for imodel in np.arange(len(model_datasets)):

        ax = fig.add_subplot(row, column, 2 + imodel)
        if map_projection == 'npstere':
            m = Basemap(ax=ax,
                        projection='npstere',
                        boundinglat=lat_min,
                        lon_0=0,
                        resolution='l',
                        fix_aspect=False)
        else:
            m = Basemap(ax=ax,
                        projection='cyl',
                        llcrnrlat=lat_min,
                        urcrnrlat=lat_max,
                        llcrnrlon=lon_min,
                        urcrnrlon=lon_max,
                        resolution='l',
                        fix_aspect=False)
        m.drawcoastlines(linewidth=1)
        m.drawcountries(linewidth=1)
        m.drawstates(linewidth=0.5, color='w')
        max = m.contourf(x,
                         y,
                         rcm_bias[imodel, :],
                         levels=clevs,
                         extend='both',
                         cmap='RdBu_r')
        ax.annotate('(' + string_list[imodel + 1] + ')  \n ' +
                    model_names[imodel],
                    xy=(lon_min, lat_min))

    cax = fig.add_axes([0.91, 0.1, 0.015, 0.8])
    plt.colorbar(max, cax=cax)

    plt.subplots_adjust(hspace=0.01, wspace=0.05)

    fig.savefig(file_name, dpi=600, bbox_inches='tight')
コード例 #30
0
ファイル: test_evaluation.py プロジェクト: imclab/climate
class TestEvaluation(unittest.TestCase):
    def setUp(self):
        self.eval = Evaluation(None, [], [])

        lat = np.array([10, 12, 14, 16, 18])
        lon = np.array([100, 102, 104, 106, 108])
        time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
        flat_array = np.array(range(300))
        value = flat_array.reshape(12, 5, 5)
        self.variable = 'prec'
        self.other_var = 'temp'
        self.test_dataset = Dataset(lat, lon, time, value, self.variable)
        self.another_test_dataset = Dataset(lat, lon, time, value,
                                            self.other_var)

    def test_init(self):
        self.assertEquals(self.eval.ref_dataset, None)
        self.assertEquals(self.eval.target_datasets, [])
        self.assertEquals(self.eval.metrics, [])
        self.assertEquals(self.eval.unary_metrics, [])

    def test_full_init(self):
        self.eval = Evaluation(
            self.test_dataset, [self.test_dataset, self.another_test_dataset],
            [Bias(), Bias(), TemporalStdDev()])

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)

    def test_invalid_ref_dataset(self):
        with self.assertRaises(TypeError):
            self.eval.ref_dataset = "This isn't a Dataset object"

    def test_valid_subregion(self):
        bound = Bounds(-10, 10, -20, 20, dt.datetime(2000, 1, 1),
                       dt.datetime(2001, 1, 1))

        self.eval.subregions = [bound, bound]
        self.assertEquals(len(self.eval.subregions), 2)

    def test_invalid_subregion_bound(self):
        bound = "This is not a bounds object"

        with self.assertRaises(TypeError):
            self.eval.subregions = [bound]

    def test_add_ref_dataset(self):
        self.eval = Evaluation(self.test_dataset, [], [])

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

    def test_add_dataset(self):
        self.eval.add_dataset(self.test_dataset)

        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)

    def test_add_datasets(self):
        self.eval.add_datasets([self.test_dataset, self.another_test_dataset])

        self.assertEqual(len(self.eval.target_datasets), 2)
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

    def test_add_metric(self):
        # Add a "binary" metric
        self.assertEqual(len(self.eval.metrics), 0)
        self.eval.add_metric(Bias())
        self.assertEqual(len(self.eval.metrics), 1)

        # Add a "unary" metric
        self.assertEqual(len(self.eval.unary_metrics), 0)
        self.eval.add_metric(TemporalStdDev())
        self.assertEqual(len(self.eval.unary_metrics), 1)

    def test_add_metrics(self):
        self.assertEqual(len(self.eval.metrics), 0)
        self.eval.add_metrics([Bias(), Bias()])
        self.assertEqual(len(self.eval.metrics), 2)

    def test_bias_output_shape(self):
        bias_eval = Evaluation(self.test_dataset, [self.another_test_dataset],
                               [Bias()])
        bias_eval.run()
        input_shape = tuple(self.test_dataset.values.shape)
        bias_results_shape = tuple(bias_eval.results[0][0].shape)
        self.assertEqual(input_shape, bias_results_shape)
コード例 #31
0
    def test_contains_only_target_datasets(self):
        new_eval = Evaluation(None, [self.local_ds], [])
        out = writer.generate_dataset_information(new_eval)

        self.assertTrue('reference' not in out)
        self.assertTrue('targets' in out)
コード例 #32
0
    def setUpClass(self):
        self.bias = metrics.Bias()
        self.tmp_std_dev = metrics.TemporalStdDev()
        loaded_metrics = [self.bias, self.tmp_std_dev]

        self.evaluation = Evaluation(None, [], loaded_metrics)
コード例 #33
0
class TestEvaluation(unittest.TestCase):

    def setUp(self):
        self.eval = Evaluation(None, [], [])

        lat = np.array([10, 12, 14, 16, 18])
        lon = np.array([100, 102, 104, 106, 108])
        time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
        flat_array = np.array(range(300))
        value = flat_array.reshape(12, 5, 5)
        self.variable = 'prec'
        self.other_var = 'temp'
        self.test_dataset = Dataset(lat, lon, time, value, self.variable)
        self.another_test_dataset = Dataset(lat, lon, time, value,
                                            self.other_var)

    def test_init(self):
        self.assertEquals(self.eval.ref_dataset, None)
        self.assertEquals(self.eval.target_datasets, [])
        self.assertEquals(self.eval.metrics, [])
        self.assertEquals(self.eval.unary_metrics, [])

    def test_full_init(self):
        self.eval = Evaluation(
            self.test_dataset,
            [self.test_dataset, self.another_test_dataset],
            [Bias(), Bias(), TemporalStdDev()])
        ref_dataset = self.test_dataset
        target_datasets = [self.test_dataset, self.another_test_dataset]
        metrics = [Bias(), Bias()]
        unary_metrics = [TemporalStdDev()]

        self.eval = Evaluation(ref_dataset,
                               target_datasets,
                               metrics + unary_metrics)

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)
        self.eval.run()
        out_str = (
            "<Evaluation - ref_dataset: {}, "
            "target_dataset(s): {}, "
            "binary_metric(s): {}, "
            "unary_metric(s): {}, "
            "subregion(s): {}>"
        ).format(
            str(self.test_dataset),
            [str(ds) for ds in target_datasets],
            [str(m) for m in metrics],
            [str(u) for u in unary_metrics],
            None
        )
        self.assertEqual(str(self.eval), out_str)

    def test_valid_ref_dataset_setter(self):
        self.eval.ref_dataset = self.another_test_dataset
        self.assertEqual(self.eval.ref_dataset.variable,
                         self.another_test_dataset.variable)

    def test_invalid_ref_dataset(self):
        with self.assertRaises(TypeError):
            self.eval.ref_dataset = "This isn't a Dataset object"

    def test_valid_subregion(self):
        bound = Bounds(
            lat_min=-10, lat_max=10,
            lon_min=-20, lon_max=20,
            start=dt.datetime(2000, 1, 1), end=dt.datetime(2001, 1, 1))

        self.eval.subregions = [bound, bound]
        self.assertEquals(len(self.eval.subregions), 2)

    def test_invalid_subregion_bound(self):
        bound = "This is not a bounds object"

        with self.assertRaises(TypeError):
            self.eval.subregions = [bound]

    def test_add_ref_dataset(self):
        self.eval = Evaluation(self.test_dataset, [], [])

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

    def test_add_valid_dataset(self):
        self.eval.add_dataset(self.test_dataset)

        self.assertEqual(self.eval.target_datasets[0].variable,
                         self.variable)

    def test_add_invalid_dataset(self):
        with self.assertRaises(TypeError):
            self.eval.add_dataset('This is an invalid dataset')

    def test_add_datasets(self):
        self.eval.add_datasets([self.test_dataset, self.another_test_dataset])

        self.assertEqual(len(self.eval.target_datasets), 2)
        self.assertEqual(self.eval.target_datasets[0].variable,
                         self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable,
                         self.other_var)

    def test_add_valid_metric(self):
        # Add a "binary" metric
        self.assertEqual(len(self.eval.metrics), 0)
        self.eval.add_metric(Bias())
        self.assertEqual(len(self.eval.metrics), 1)

        # Add a "unary" metric
        self.assertEqual(len(self.eval.unary_metrics), 0)
        self.eval.add_metric(TemporalStdDev())
        self.assertEqual(len(self.eval.unary_metrics), 1)

    def test_add_invalid_metric(self):
        with self.assertRaises(TypeError):
            self.eval.add_metric('This is an invalid metric')

    def test_add_metrics(self):
        self.assertEqual(len(self.eval.metrics), 0)
        self.eval.add_metrics([Bias(), Bias()])
        self.assertEqual(len(self.eval.metrics), 2)

    def test_invalid_evaluation_run(self):
        self.eval = Evaluation(None, [], [])
        self.assertEqual(self.eval.run(), None)

    def test_bias_output_shape(self):
        bias_eval = Evaluation(self.test_dataset, [
                               self.another_test_dataset], [Bias()])
        bias_eval.run()
        input_shape = tuple(self.test_dataset.values.shape)
        bias_results_shape = tuple(bias_eval.results[0][0].shape)
        self.assertEqual(input_shape, bias_results_shape)

    def test_result_shape(self):
        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset,
                self.another_test_dataset],
            [Bias(), Bias()]
        )
        bias_eval.run()

        # Expected result shape is
        # [bias, bias] where bias.shape[0] = number of datasets
        self.assertTrue(len(bias_eval.results) == 2)
        self.assertTrue(bias_eval.results[0].shape[0] == 3)

    def test_unary_result_shape(self):
        new_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset,
                self.another_test_dataset, self.another_test_dataset],
            [TemporalStdDev()]
        )
        new_eval.run()

        # Expected result shape is
        # [stddev] where stddev.shape[0] = number of datasets

        self.assertTrue(len(new_eval.unary_results) == 1)
        self.assertTrue(new_eval.unary_results[0].shape[0] == 5)

    def test_subregion_result_shape(self):
        bound = Bounds(
            lat_min=10, lat_max=18,
            lon_min=100, lon_max=108,
            start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))

        bias_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [Bias()],
            [bound]
        )
        bias_eval.run()

        # Expected result shape is
        # [
        #       [   # Subregions cause this extra layer
        #           [number of targets, bias.run(reference, target1).shape]
        #       ]
        #   ],
        self.assertTrue(len(bias_eval.results) == 1)
        self.assertTrue(len(bias_eval.results[0]) == 1)
        self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
        self.assertTrue(isinstance(bias_eval.results, type([])))

    def test_subregion_unary_result_shape(self):
        bound = Bounds(
            lat_min=10, lat_max=18,
            lon_min=100, lon_max=108,
            start=dt.datetime(2000, 1, 1), end=dt.datetime(2000, 3, 1))

        new_eval = Evaluation(
            self.test_dataset,
            [self.another_test_dataset, self.another_test_dataset],
            [TemporalStdDev(), TemporalStdDev()],
            [bound, bound, bound, bound, bound]
        )
        new_eval.run()

        # Expected result shape is
        # [
        #       [   # Subregions cause this extra layer
        #           [3, temporalstddev.run(reference).shape],
        #       ]
        # ]

        # 5 = number of subregions
        self.assertTrue(len(new_eval.unary_results) == 5)
        # number of metrics
        self.assertTrue(len(new_eval.unary_results[0]) == 2)
        self.assertTrue(isinstance(new_eval.unary_results, type([])))
        # number of datasets (ref + target)
        self.assertTrue(new_eval.unary_results[0][0].shape[0] == 3)
コード例 #34
0
    def test_empty_metrics_in_evaluation(self):
        new_eval = Evaluation(None, [], [])
        out = writer.generate_metric_information(new_eval)

        self.assertTrue(type(out) == type(list()))
        self.assertTrue(len(out) == 0)
コード例 #35
0
 def test_invalid_evaluation_run(self):
     self.eval = Evaluation(None, [], [])
     self.assertEqual(self.eval.run(), None)
コード例 #36
0
    def test_add_ref_dataset(self):
        self.eval = Evaluation(self.test_dataset, [], [])

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)
コード例 #37
0
    def setUpClass(self):
        self.lats = np.array([10, 12, 14, 16, 18])
        self.lons = np.array([100, 102, 104, 106, 108])
        self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
        flat_array = np.array(range(300))
        self.values = flat_array.reshape(12, 5, 5)
        self.variable = 'var'
        self.units = 'units'
        self.name = 'name'

        self.local_origin = {
            'source': 'local',
            'path': '/a/fake/path.nc',
            'lat_name': 'a lat name',
            'lon_name': 'a lon name',
            'time_name': 'a time name',
            'elevation_index': 2
        }

        self.rcmed_origin = {
            'source': 'rcmed',
            'dataset_id': 4,
            'parameter_id': 14
        }

        self.esgf_origin = {
            'source': 'esgf',
            'dataset_id': 'esgf dataset id',
            'variable': 'var'
        }

        self.dap_origin = {
            'source': 'dap',
            'url': 'a fake url',
        }

        self.local_ds = Dataset(self.lats,
                                self.lons,
                                self.times,
                                self.values,
                                variable=self.variable,
                                units=self.units,
                                name=self.name,
                                origin=self.local_origin)

        self.rcmed_ds = Dataset(self.lats,
                                self.lons,
                                self.times,
                                self.values,
                                variable=self.variable,
                                units=self.units,
                                name=self.name,
                                origin=self.rcmed_origin)

        self.esgf_ds = Dataset(self.lats,
                               self.lons,
                               self.times,
                               self.values,
                               variable=self.variable,
                               units=self.units,
                               name=self.name,
                               origin=self.esgf_origin)

        self.dap_ds = Dataset(self.lats,
                              self.lons,
                              self.times,
                              self.values,
                              variable=self.variable,
                              units=self.units,
                              name=self.name,
                              origin=self.dap_origin)

        self.subregions = [
            Bounds(lat_min=-10, lat_max=10, lon_min=-20, lon_max=20),
            Bounds(lat_min=-5, lat_max=5, lon_min=-15, lon_max=15)
        ]

        self.evaluation = Evaluation(
            self.local_ds, [self.rcmed_ds, self.esgf_ds, self.dap_ds],
            [metrics.Bias(), metrics.TemporalStdDev()],
            subregions=self.subregions)
コード例 #38
0
def run_evaluation():
    ''' Run an OCW Evaluation.

    *run_evaluation* expects the Evaluation parameters to be POSTed in
    the following format.

    .. sourcecode:: javascript

        {
            reference_dataset: {
                // Id that tells us how we need to load this dataset.
                'data_source_id': 1 == local, 2 == rcmed,

                // Dict of data_source specific identifying information.
                //
                // if data_source_id == 1 == local:
                // {
                //     'id': The path to the local file on the server for loading.
                //     'var_name': The variable data to pull from the file.
                //     'lat_name': The latitude variable name.
                //     'lon_name': The longitude variable name.
                //     'time_name': The time variable name
                //     'name': Optional dataset name
                // }
                //
                // if data_source_id == 2 == rcmed:
                // {
                //     'dataset_id': The dataset id to grab from RCMED.
                //     'parameter_id': The variable id value used by RCMED.
                //     'name': Optional dataset name
                // }
                'dataset_info': {..}
            },

            // The list of target datasets to use in the Evaluation. The data
            // format for the dataset objects should be the same as the
            // reference_dataset above.
            'target_datasets': [{...}, {...}, ...],

            // All the datasets are re-binned to the reference dataset
            // before being added to an experiment. This step (in degrees)
            // is used when re-binning both the reference and target datasets.
            'spatial_rebin_lat_step': The lat degree step. Integer > 0,

            // Same as above, but for lon
            'spatial_rebin_lon_step': The lon degree step. Integer > 0,

            // The temporal resolution to use when doing a temporal re-bin
            // This is a timedelta of days to use so daily == 1, monthly is
            // (1, 31], annual/yearly is (31, 366], and full is anything > 366.
            'temporal_resolution': Integer in range(1, 999),

            // A list of the metric class names to use in the evaluation. The
            // names must match the class name exactly.
            'metrics': [Bias, TemporalStdDev, ...]

            // The bounding values used in the Evaluation. Note that lat values
            // should range from -180 to 180 and lon values from -90 to 90.
            'start_time': start time value in the format '%Y-%m-%d %H:%M:%S',
            'end_time': end time value in the format '%Y-%m-%d %H:%M:%S',
            'lat_min': The minimum latitude value,
            'lat_max': The maximum latitude value,
            'lon_min': The minimum longitude value,
            'lon_max': The maximum longitude value,

            // NOTE: At the moment, subregion support is fairly minimal. This
            // will be addressed in the future. Ideally, the user should be able
            // to load a file that they have locally. That would change the
            // format that this data is passed.
            'subregion_information': Path to a subregion file on the server.
        }
    '''
    # TODO: validate input parameters and return an error if not valid

    eval_time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    data = request.json

    eval_bounds = {
        'start_time': datetime.strptime(data['start_time'], '%Y-%m-%d %H:%M:%S'),
        'end_time': datetime.strptime(data['end_time'], '%Y-%m-%d %H:%M:%S'),
        'lat_min': float(data['lat_min']),
        'lat_max': float(data['lat_max']),
        'lon_min': float(data['lon_min']),
        'lon_max': float(data['lon_max'])
    }

    # Load all the datasets
    ref_dataset = _process_dataset_object(data['reference_dataset'], eval_bounds)

    target_datasets = [_process_dataset_object(obj, eval_bounds)
					   for obj
					   in data['target_datasets']]

    # Normalize the dataset time values so they break on consistent days of the
    # month or time of the day, depending on how they will be rebinned.
    resolution = data['temporal_resolution']
    time_delta = timedelta(days=resolution)

    time_step = 'daily' if resolution == 1 else 'monthly'
    ref_dataset = dsp.normalize_dataset_datetimes(ref_dataset, time_step)
    target_datasets = [dsp.normalize_dataset_datetimes(ds, time_step)
                       for ds in target_datasets]

    # Subset the datasets
    start = eval_bounds['start_time']
    end = eval_bounds['end_time']

    # Normalize all the values to the first of the month if we're not
    # dealing with daily data. This will ensure that a valid subregion
    # isn't considered out of bounds do to a dataset's time values
    # being shifted to the first of the month.
    if time_step != 'daily':
        if start.day != 1:
            day_offset = start.day - 1
            start -= timedelta(days=day_offset)

        if end.day != 1:
            day_offset = end.day - 1
            end -= timedelta(days=day_offset)

    subset = Bounds(eval_bounds['lat_min'],
                    eval_bounds['lat_max'],
                    eval_bounds['lon_min'],
                    eval_bounds['lon_max'],
                    start,
                    end)

    ref_dataset = dsp.safe_subset(ref_dataset, subset)
    target_datasets = [dsp.safe_subset(ds, subset)
                       for ds
                       in target_datasets]
    
    # Do temporal re-bin based off of passed resolution
    ref_dataset = dsp.temporal_rebin(ref_dataset, time_delta)
    target_datasets = [dsp.temporal_rebin(ds, time_delta)
					   for ds
					   in target_datasets]

    # Do spatial re=bin based off of reference dataset + lat/lon steps
    lat_step = data['spatial_rebin_lat_step']
    lon_step = data['spatial_rebin_lon_step']
    lat_bins, lon_bins = _calculate_new_latlon_bins(eval_bounds,
													lat_step,
													lon_step)

    ref_dataset = dsp.spatial_regrid(ref_dataset, lat_bins, lon_bins)
    target_datasets =  [dsp.spatial_regrid(ds, lat_bins, lon_bins)
						for ds
						in target_datasets]

    # Load metrics
    loaded_metrics = _load_metrics(data['metrics'])

    # Prime evaluation object with data
    evaluation = Evaluation(ref_dataset, target_datasets, loaded_metrics)

    # Run evaluation
    evaluation.run()

    # Plot
    _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp)

    return json.dumps({'eval_work_dir': eval_time_stamp})
コード例 #39
0
ファイル: processing.py プロジェクト: darth-pr/climate
def run_evaluation():
    ''' Run an OCW Evaluation.

    *run_evaluation* expects the Evaluation parameters to be POSTed in
    the following format.

    .. sourcecode:: javascript

        {
            reference_dataset: {
                // Id that tells us how we need to load this dataset.
                'data_source_id': 1 == local, 2 == rcmed,

                // Dict of data_source specific identifying information.
                //
                // if data_source_id == 1 == local:
                // {
                //     'id': The path to the local file on the server for loading.
                //     'var_name': The variable data to pull from the file.
                //     'lat_name': The latitude variable name.
                //     'lon_name': The longitude variable name.
                //     'time_name': The time variable name
                //     'name': Optional dataset name
                // }
                //
                // if data_source_id == 2 == rcmed:
                // {
                //     'dataset_id': The dataset id to grab from RCMED.
                //     'parameter_id': The variable id value used by RCMED.
                //     'name': Optional dataset name
                // }
                'dataset_info': {..}
            },

            // The list of target datasets to use in the Evaluation. The data
            // format for the dataset objects should be the same as the
            // reference_dataset above.
            'target_datasets': [{...}, {...}, ...],

            // All the datasets are re-binned to the reference dataset
            // before being added to an experiment. This step (in degrees)
            // is used when re-binning both the reference and target datasets.
            'spatial_rebin_lat_step': The lat degree step. Integer > 0,

            // Same as above, but for lon
            'spatial_rebin_lon_step': The lon degree step. Integer > 0,

            // The temporal resolution to use when doing a temporal re-bin
            // This is a timedelta of days to use so daily == 1, monthly is
            // (1, 31], annual/yearly is (31, 366], and full is anything > 366.
            'temporal_resolution': Integer in range(1, 999),

            // A list of the metric class names to use in the evaluation. The
            // names must match the class name exactly.
            'metrics': [Bias, TemporalStdDev, ...]

            // The bounding values used in the Evaluation. Note that lat values
            // should range from -180 to 180 and lon values from -90 to 90.
            'start_time': start time value in the format '%Y-%m-%d %H:%M:%S',
            'end_time': end time value in the format '%Y-%m-%d %H:%M:%S',
            'lat_min': The minimum latitude value,
            'lat_max': The maximum latitude value,
            'lon_min': The minimum longitude value,
            'lon_max': The maximum longitude value,

            // NOTE: At the moment, subregion support is fairly minimal. This
            // will be addressed in the future. Ideally, the user should be able
            // to load a file that they have locally. That would change the
            // format that this data is passed.
            'subregion_information': Path to a subregion file on the server.
        }
    '''
    # TODO: validate input parameters and return an error if not valid

    eval_time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    data = request.json

    eval_bounds = {
        'start_time': datetime.strptime(data['start_time'], '%Y-%m-%d %H:%M:%S'),
        'end_time': datetime.strptime(data['end_time'], '%Y-%m-%d %H:%M:%S'),
        'lat_min': float(data['lat_min']),
        'lat_max': float(data['lat_max']),
        'lon_min': float(data['lon_min']),
        'lon_max': float(data['lon_max'])
    }

    # Load all the datasets
    ref_dataset = _process_dataset_object(data['reference_dataset'], eval_bounds)

    target_datasets = [_process_dataset_object(obj, eval_bounds)
					   for obj
					   in data['target_datasets']]

    # Normalize the dataset time values so they break on consistent days of the
    # month or time of the day, depending on how they will be rebinned.
    resolution = data['temporal_resolution']
    time_delta = timedelta(days=resolution)

    time_step = 'daily' if resolution == 1 else 'monthly'
    ref_dataset = dsp.normalize_dataset_datetimes(ref_dataset, time_step)
    target_datasets = [dsp.normalize_dataset_datetimes(ds, time_step)
                       for ds in target_datasets]

    # Subset the datasets
    start = eval_bounds['start_time']
    end = eval_bounds['end_time']

    # Normalize all the values to the first of the month if we're not
    # dealing with daily data. This will ensure that a valid subregion
    # isn't considered out of bounds do to a dataset's time values
    # being shifted to the first of the month.
    if time_step != 'daily':
        if start.day != 1:
            day_offset = start.day - 1
            start -= timedelta(days=day_offset)

        if end.day != 1:
            day_offset = end.day - 1
            end -= timedelta(days=day_offset)

    subset = Bounds(eval_bounds['lat_min'],
                    eval_bounds['lat_max'],
                    eval_bounds['lon_min'],
                    eval_bounds['lon_max'],
                    start,
                    end)

    ref_dataset = dsp.safe_subset(subset, ref_dataset)
    target_datasets = [dsp.safe_subset(subset, ds)
                       for ds
                       in target_datasets]
    
    # Do temporal re-bin based off of passed resolution
    ref_dataset = dsp.temporal_rebin(ref_dataset, time_delta)
    target_datasets = [dsp.temporal_rebin(ds, time_delta)
					   for ds
					   in target_datasets]

    # Do spatial re=bin based off of reference dataset + lat/lon steps
    lat_step = data['spatial_rebin_lat_step']
    lon_step = data['spatial_rebin_lon_step']
    lat_bins, lon_bins = _calculate_new_latlon_bins(eval_bounds,
													lat_step,
													lon_step)

    ref_dataset = dsp.spatial_regrid(ref_dataset, lat_bins, lon_bins)
    target_datasets =  [dsp.spatial_regrid(ds, lat_bins, lon_bins)
						for ds
						in target_datasets]

    # Load metrics
    loaded_metrics = _load_metrics(data['metrics'])

    # Prime evaluation object with data
    evaluation = Evaluation(ref_dataset, target_datasets, loaded_metrics)

    # Run evaluation
    evaluation.run()

    # Plot
    _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp)

    return json.dumps({'eval_work_dir': eval_time_stamp})
コード例 #40
0
ファイル: test_evaluation.py プロジェクト: AtomLaw/climate
class TestEvaluation(unittest.TestCase):
    def setUp(self):
        self.eval = Evaluation(None, [], [])

        lat = np.array([10, 12, 14, 16, 18])
        lon = np.array([100, 102, 104, 106, 108])
        time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
        flat_array = np.array(range(300))
        value = flat_array.reshape(12, 5, 5)
        self.variable = 'prec'
        self.other_var = 'temp'
        self.test_dataset = Dataset(lat, lon, time, value, self.variable)
        self.another_test_dataset = Dataset(lat, lon, time, value, 
                self.other_var)

    def test_init(self):
        self.assertEquals(self.eval.ref_dataset, None)
        self.assertEquals(self.eval.target_datasets, [])
        self.assertEquals(self.eval.metrics, [])
        self.assertEquals(self.eval.unary_metrics, [])

    def test_full_init(self):
        self.eval = Evaluation(
                        self.test_dataset,           
                        [self.test_dataset, self.another_test_dataset], 
                        [Bias(), Bias(), TemporalStdDev()])                    

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

        # Make sure the two target datasets were added properly
        self.assertEqual(self.eval.target_datasets[0].variable, self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, self.other_var)

        # Make sure the three metrics were added properly
        # The two Bias metrics are "binary" metrics
        self.assertEqual(len(self.eval.metrics), 2)
        # TemporalStdDev is a "unary" metric and should be stored as such
        self.assertEqual(len(self.eval.unary_metrics), 1)

    def test_invalid_ref_dataset(self):
        with self.assertRaises(TypeError):
            self.eval.ref_dataset = "This isn't a Dataset object"

    def test_valid_subregion(self):
        bound = Bounds(
                -10, 10, 
                -20, 20, 
                dt.datetime(2000, 1, 1), dt.datetime(2001, 1, 1))

        self.eval.subregions = [bound, bound]
        self.assertEquals(len(self.eval.subregions), 2)

    def test_invalid_subregion_bound(self):
        bound = "This is not a bounds object"

        with self.assertRaises(TypeError):
            self.eval.subregions = [bound]

    def test_add_ref_dataset(self):
        self.eval = Evaluation(self.test_dataset, [], [])

        self.assertEqual(self.eval.ref_dataset.variable, self.variable)

    def test_add_dataset(self):
        self.eval.add_dataset(self.test_dataset)

        self.assertEqual(self.eval.target_datasets[0].variable, 
                self.variable)

    def test_add_datasets(self):
        self.eval.add_datasets([self.test_dataset, self.another_test_dataset])

        self.assertEqual(len(self.eval.target_datasets), 2)
        self.assertEqual(self.eval.target_datasets[0].variable, 
                self.variable)
        self.assertEqual(self.eval.target_datasets[1].variable, 
                self.other_var)

    def test_add_metric(self):
        # Add a "binary" metric
        self.assertEqual(len(self.eval.metrics), 0)
        self.eval.add_metric(Bias())
        self.assertEqual(len(self.eval.metrics), 1)

        # Add a "unary" metric
        self.assertEqual(len(self.eval.unary_metrics), 0)
        self.eval.add_metric(TemporalStdDev())
        self.assertEqual(len(self.eval.unary_metrics), 1)

    def test_add_metrics(self):
        self.assertEqual(len(self.eval.metrics), 0)
        self.eval.add_metrics([Bias(), Bias()])
        self.assertEqual(len(self.eval.metrics), 2)
    
    def test_bias_output_shape(self):
        bias_eval = Evaluation(self.test_dataset, [self.another_test_dataset], [Bias()])
        bias_eval.run()
        input_shape = tuple(self.test_dataset.values.shape)
        bias_results_shape = tuple(bias_eval.results[0][0].shape)
        self.assertEqual(input_shape, bias_results_shape)