Exemplo n.º 1
0
    def test_period_not_supported(self):
        """Test for time avg of a realistic time axis and 365 day calendar"""
        data = np.ones((6, ))
        times = np.array([15, 45, 74, 105, 135, 166])
        bounds = np.array([[0, 31], [31, 59], [59, 90], [90, 120], [120, 151],
                           [151, 181]])
        cube = self._create_cube(data, times, bounds)

        with self.assertRaises(ValueError):
            climate_statistics(cube, operator='mean', period='bad')
Exemplo n.º 2
0
def test_climate_statistics_complex_cube():
    """Test climate statistics."""
    cube = _make_cube()
    new_cube = climate_statistics(cube, operator='sum', period='full')
    assert cube.shape == (2, 1, 1, 3)
    assert new_cube.shape == (1, 1, 3)
    np.testing.assert_allclose(new_cube.data, [[[45.0, 45.0, 45.0]]])
Exemplo n.º 3
0
    def compute(self):
        # ---------------------------------------------------------------------
        # Every dataset in the recipe is associated with an alias. We are going
        # to use the alias and the group_metadata shared function to loop over
        # the datasets.
        #----------------------------------------------------------------------
        data = group_metadata(self.cfg['input_data'].values(), 'alias')
        total = {}
        # Loop over the datasets.
        for alias in data:
            # -----------------------------------------------------------------
            # Use the group_metadata function again so that, for each dataset,
            # the metadata dictionary is organised by the variables'
            # short name.
            # -----------------------------------------------------------------
            variables = group_metadata(data[alias], 'short_name')
            # Returns the path to the preprocessed files.
            tas_file = variables['tas'][0]['filename']
            #           tos_file = variables['tos'][0]['filename']

            # -----------------------------------------------------------------
            # Now it is up to you to load the data and work with it with your
            # preferred libraries and methods. We are going to continue the
            # example using Iris and ESMValTool functions.
            # Note that all preprocessor routines can be called inside the
            # diag_script by adding the corresponding imports.
            # -----------------------------------------------------------------
            tas = iris.load(tas_file)[0]
            tas.convert_units('degC')
            y_mn = seasonal_statistics(tas, 'mean')  #[0]
            # Returns the path to the preprocessed files.
            mn = climate_statistics(tas, 'mean', 'season')[0]
            std = climate_statistics(tas, 'std_dev', 'season')[0]
            #anom = anomalies(y_mn, 'season')
            with open("/esarchive/scratch/jcos/esmvaltool/scripts/sample.txt",
                      "w") as f:
                f.write(str(tas))
                f.write('******************************** \\ y_mean******')
                f.write(str(y_mn[0]))
                f.write('********************************\\ mn******')
                f.write(str(y_mn[1]))
                f.write('******************************\\ std*******')
                f.write(str(std))
                f.write('********************************')
                f.write(str(0))
                f.write('********************************')
Exemplo n.º 4
0
    def test_time_median(self):
        """Test for time meadian of a 1D field."""
        data = np.arange((3))
        times = np.array([15., 45., 75.])
        bounds = np.array([[0., 30.], [30., 60.], [60., 90.]])
        cube = self._create_cube(data, times, bounds)

        result = climate_statistics(cube, operator='median')
        expected = np.array([1.])
        assert_array_equal(result.data, expected)
Exemplo n.º 5
0
    def test_time_sum_uneven(self):
        """Test for time sum of a 1D field with uneven time boundaries."""
        data = np.array([1., 5.])
        times = np.array([5., 25.])
        bounds = np.array([[0., 1.], [1., 4.]])
        cube = self._create_cube(data, times, bounds)

        result = climate_statistics(cube, operator='sum')
        expected = np.array([16.0])
        assert_array_equal(result.data, expected)
Exemplo n.º 6
0
    def test_day(self):
        """Test for time avg of a realistic time axis and 365 day calendar"""
        data = np.ones((6, ))
        times = np.array([0.5, 1.5, 2.5, 365.5, 366.5, 367.5])
        bounds = np.array([[0, 1], [1, 2], [2, 3], [365, 366], [366, 367],
                           [367, 368]])
        cube = self._create_cube(data, times, bounds)

        result = climate_statistics(cube, operator='mean', period='day')
        expected = np.array([1, 1, 1])
        assert_array_equal(result.data, expected)
Exemplo n.º 7
0
    def test_monthly(self):
        """Test for time avg of a realistic time axis and 365 day calendar"""
        data = np.ones((6, ))
        times = np.array([15, 45, 74, 105, 135, 166])
        bounds = np.array([[0, 31], [31, 59], [59, 90], [90, 120], [120, 151],
                           [151, 181]])
        cube = self._create_cube(data, times, bounds)

        result = climate_statistics(cube, operator='mean', period='mon')
        expected = np.ones((6, ))
        assert_array_equal(result.data, expected)
Exemplo n.º 8
0
    def test_time_sum(self):
        """Test for time sum of a 1D field."""
        data = np.ones((3))
        data[1] = 2.0
        times = np.array([15., 45., 75.])
        bounds = np.array([[0., 30.], [30., 60.], [60., 90.]])
        cube = self._create_cube(data, times, bounds)

        result = climate_statistics(cube, operator='sum')
        expected = np.array([120.])
        assert_array_equal(result.data, expected)
Exemplo n.º 9
0
    def test_time_sum_365_day(self):
        """Test for time sum of a realistic time axis and 365 day calendar"""
        data = np.ones((6, ))
        data[3] = 2.0
        times = np.array([15, 45, 74, 105, 135, 166])
        bounds = np.array([[0, 31], [31, 59], [59, 90], [90, 120], [120, 151],
                           [151, 181]])
        cube = self._create_cube(data, times, bounds)

        result = climate_statistics(cube, operator='sum')
        expected = np.array([211.])
        assert_array_equal(result.data, expected)
Exemplo n.º 10
0
def test_climate_statistics_0d_time_1d_lon():
    """Test climate statistics."""
    time = iris.coords.DimCoord([1.0], bounds=[[0.0, 2.0]], var_name='time',
                                standard_name='time',
                                units='days since 1850-01-01 00:00:00')
    lons = get_lon_coord()
    cube = iris.cube.Cube([[1.0, -1.0, 42.0]], var_name='x', units='K',
                          dim_coords_and_dims=[(time, 0), (lons, 1)])
    new_cube = climate_statistics(cube, operator='sum', period='full')
    assert cube.shape == (1, 3)
    assert new_cube.shape == (3,)
    np.testing.assert_allclose(new_cube.data, [1.0, -1.0, 42.0])
Exemplo n.º 11
0
    def compute(self):
        print('----------- COMPUTE ----------')
        # ---------------------------------------------------------------------
        # Every dataset in the recipe is associated with an alias. We are going
        # to use th:We alias and the group_metadata shared function to loop over
        # the datasets.
        #----------------------------------------------------------------------
        data = group_metadata(self.cfg['input_data'].values(), 'alias')
        ssp_trend = {}
        ssp_clim = {}
        hist_trend = {}
        hist_clim = {}
        # Loop over the datasets.
        for alias in data:
            exp = data[alias][0]['exp']
            variables = group_metadata(data[alias], 'short_name')
            # Returns the path to the preprocessed files.
            tas_file = variables['tas'][0]['filename']
            tas = iris.load(tas_file)[0]
            tas.convert_units('degC')

            # Calculate Trends
            nlat = tas.coord('latitude').shape[0]
            nlon = tas.coord('longitude').shape[0]
            lat = tas.coord('latitude').points
            lon = tas.coord('longitude').points
            time_array = np.arange(1, tas.coord('time').shape[0] + 1, 1)
            regr = np.zeros([nlat, nlon])
            for j in range(nlat):
                for k in range(nlon):
                    p = np.polyfit(time_array, tas[:, j, k].data, 1)
                    regr[j, k] = p[0] * 10  # the 10 is to convert to decadal
            latitude = DimCoord(lat, standard_name='latitude', units='degrees')
            longitude = DimCoord(lon,
                                 standard_name='longitude',
                                 units='degrees')
            regr_cube = Cube(regr,
                             dim_coords_and_dims=[(latitude, 0),
                                                  (longitude, 1)])
            ### ---------- remask -------------- ###
            # finding the trends turns the remask usseless as pyplot doesn't care about masked arrays
            # Ergo another remask is needed.
            output_trend = mask_landsea(regr_cube,
                                        ['/blablabla/where/the/fx/at/'], 'sea',
                                        True)
            # Save the output trends in the cube dict
            output_trend.standard_name = None
            output_trend.long_name = 'tas_trend_med'
            output_trend.short_name = 'tastrend'

            # Calculate Climatology
            output_clim = climate_statistics(tas)
            output_clim.standard_name = None
            output_clim.long_name = 'tas_clim_med'
            output_clim.short_name = 'tasclim'

            # Save diagnosed dataset to dict. TODO: what about averaging first?
            if exp == 'historical':
                hist_trend[alias] = output_trend
                hist_clim[alias] = output_clim
            if exp == 'ssp585':
                ssp_trend[alias] = output_trend
                ssp_clim[alias] = output_clim
            # Save the outputs for each dataset.
            #self.save(output, alias, data)
        # Plot the results.
        #self.plot_2D(total, data)
        print(ssp_trend.variables)
        print(len(ssp_trend))
Exemplo n.º 12
0
    def compute(self):
        print('----------- COMPUTE ----------')
        # ---------------------------------------------------------------------
        # Every dataset in the recipe is associated with an alias. We are going
        # to use th:We alias and the group_metadata shared function to loop over
        # the datasets.
        #----------------------------------------------------------------------
        data = group_metadata(self.cfg['input_data'].values(), 'alias')
        ssp_ts = {}
        hist_ts = {} 
        rean_ts = {}
        hist = 0
        ssp = 0
        rean = 0
        # Loop over the datasets.
        for i, alias in enumerate(data):
            exp = data[alias][0]['exp']
            variables = group_metadata(data[alias], 'short_name')
            # Returns the path to the preprocessed files.
            tas_file = variables['tas'][0]['filename']
            tas = iris.load(tas_file)[0]
            tas.convert_units('degC')
            if i == 0:
                climatology = self.ref_clim(tas, 1960, 1962)
            #anomaly = tas - climatology
            #timeseries = anomaly.collapsed(['longitude', 'latitude'], iris.analysis.MEAN)
            timeseries = tas.collapsed(['longitude', 'latitude'], iris.analysis.MEAN)
            #timeseries.long_name = 'med_r_timeseries_tas'
  
            # Calculate Trends
            nlat = tas.coord('latitude').shape[0]
            nlon = tas.coord('longitude').shape[0]
            lat = tas.coord('latitude').points
            lon = tas.coord('longitude').points
            time_array = np.arange(1,tas.coord('time').shape[0]+1,1)
            regr = np.zeros([nlat, nlon])
            for j in range(nlat):
                for k in range(nlon):
                    p = np.polyfit(time_array, tas[:,j,k].data, 1)
                    regr[j, k] = p[0]*10 # the 10 is to convert to decadal
            latitude = DimCoord(lat, standard_name='latitude', units='degrees')
            longitude = DimCoord(lon, standard_name='longitude', units='degrees')
            regr_cube = Cube(regr, dim_coords_and_dims=[(latitude, 0), (longitude, 1)])
            ### ---------- remask -------------- ###
            # finding the trends turns the remask usseless as pyplot doesn't care about masked arrays
            # Ergo another remask is needed.
            output_trend = mask_landsea(regr_cube, ['/blablabla/where/the/fx/at/'] ,'sea', True)   
            # Save the output trends in the cube dict
            output_trend.standard_name = None 
            output_trend.long_name = 'tas_trend_med'
            output_trend.short_name = 'tastrend'
        
            # Calculate Climatology
            output_clim = climate_statistics(tas)           
            output_clim.standard_name = None
            output_clim.long_name = 'tas_clim_med'
            output_clim.short_name = 'tasclim' 

            # Save diagnosed dataset to dict. TODO: what about averaging first? 
            if exp == 'historical':
                hist_ts[alias] = timeseries 
                if hist == 0:
                   mean_hist_trend = output_trend
                   mean_hist_clim = output_clim
                   hist += 1
                else:
                   mean_hist_trend = (mean_hist_trend + output_trend)
                   mean_hist_clim = (mean_hist_clim + output_clim)
                   hist += 1
            if exp == 'ssp585':
                ssp_ts[alias] = timeseries 
                if ssp == 0:
                   mean_ssp_trend = output_trend
                   mean_ssp_clim = output_clim
                   ssp += 1
                else:
                   mean_ssp_trend = (mean_ssp_trend + output_trend)
                   mean_ssp_clim = (mean_ssp_clim + output_clim)
                   ssp += 1
            if exp == 'reanaly':
                rean_ts[alias] = timeseries 
                if rean == 0:
                   mean_rean_trend = output_trend
                   mean_rean_clim = output_clim
                   rean += 1
                else:
                   mean_rean_trend = (mean_rean_trend + output_trend)
                   mean_rean_clim = (mean_rean_clim + output_clim)
                   rean += 1
            
        mean_hist_trend = mean_hist_trend/hist
        mean_hist_clim = mean_hist_clim/hist
        mean_ssp_trend = mean_ssp_trend/ssp
        mean_ssp_clim = mean_ssp_clim/ssp
        #mean_rean_trend = mean_rean_trend/ssp
        #mean_rean_clim = mean_rean_clim/ssp
        mean_ssp_trend.long_name = 'ssp_trend_Med'
        mean_ssp_clim.long_name = 'ssp_clim_Med'
        mean_hist_trend.long_name = 'hist_trend_Med'
        mean_hist_clim.long_name = 'hist_clim_Med'
       # mean_rean_trend.long_name = 'rean_trend_Med'
       # mean_rean_clim.long_name = 'rean_clim_Med'
                
        ##### Biases #####
        #trend_bias = mean_hist_trend - mean_rean_trend
        #clim_bias = mean_hist_clim - mean_rean_clim


            # Save the outputs for each dataset.
            #self.save(output, alias, data)
        # Plot the results.
        self.plot_2D(mean_ssp_trend)
        self.plot_1D(timeseries)   
        print(mean_ssp_trend)