예제 #1
0
    def test_season_max(self):
        """Test for season max of a 1D field."""
        data = np.arange(12)
        times = np.arange(15, 360, 30)
        cube = self._create_cube(data, times)

        result = seasonal_statistics(cube, 'max')
        expected = np.array([4., 7., 10.])
        assert_array_equal(result.data, expected)
예제 #2
0
    def test_season_sum(self):
        """Test for season sum of a 1D field."""
        data = np.arange(12)
        times = np.arange(15, 360, 30)
        cube = self._create_cube(data, times)

        result = seasonal_statistics(cube, 'sum')
        expected = np.array([9., 18., 27.])
        assert_array_equal(result.data, expected)
예제 #3
0
    def test_season_mean(self):
        """Test for season average of a 1D field."""
        data = np.arange(12)
        times = np.arange(15, 360, 30)
        cube = self._create_cube(data, times)

        result = seasonal_statistics(cube, 'mean')
        expected = np.array([3., 6., 9.])
        assert_array_equal(result.data, expected)
예제 #4
0
    def compute(self):
        # ---------------------------------------------------------------------
        # Every dataset in the recipe is associated with an alias. We are going
        # to use the alias and the group_metadata shared function to loop over
        # the datasets.
        #----------------------------------------------------------------------
        data = group_metadata(self.cfg['input_data'].values(), 'alias')
        total = {}
        # Loop over the datasets.
        for alias in data:
            # -----------------------------------------------------------------
            # Use the group_metadata function again so that, for each dataset,
            # the metadata dictionary is organised by the variables'
            # short name.
            # -----------------------------------------------------------------
            variables = group_metadata(data[alias], 'short_name')
            # Returns the path to the preprocessed files.
            tas_file = variables['tas'][0]['filename']
            #           tos_file = variables['tos'][0]['filename']

            # -----------------------------------------------------------------
            # Now it is up to you to load the data and work with it with your
            # preferred libraries and methods. We are going to continue the
            # example using Iris and ESMValTool functions.
            # Note that all preprocessor routines can be called inside the
            # diag_script by adding the corresponding imports.
            # -----------------------------------------------------------------
            tas = iris.load(tas_file)[0]
            tas.convert_units('degC')
            y_mn = seasonal_statistics(tas, 'mean')  #[0]
            # Returns the path to the preprocessed files.
            mn = climate_statistics(tas, 'mean', 'season')[0]
            std = climate_statistics(tas, 'std_dev', 'season')[0]
            #anom = anomalies(y_mn, 'season')
            with open("/esarchive/scratch/jcos/esmvaltool/scripts/sample.txt",
                      "w") as f:
                f.write(str(tas))
                f.write('******************************** \\ y_mean******')
                f.write(str(y_mn[0]))
                f.write('********************************\\ mn******')
                f.write(str(y_mn[1]))
                f.write('******************************\\ std*******')
                f.write(str(std))
                f.write('********************************')
                f.write(str(0))
                f.write('********************************')