def run_my_diagnostic(cfg):
    """
    Simple example of a diagnostic.

    This is a basic (and rather esotherical) diagnostic that firstly
    loads the needed model data as iris cubes, performs a difference between
    values at ground level and first vertical level, then squares the
    result.

    Before plotting, we grab the squared result (not all operations on cubes)
    and apply an area average on it. This is a useful example of how to use
    standard esmvalcore.preprocessor functionality within a diagnostic, and
    especially after a certain (custom) diagnostic has been run and the user
    needs to perform an operation that is already part of the preprocessor
    standard library of functions.

    The user will implement their own (custom) diagnostics, but this
    example shows that once the preprocessor has finished a whole lot of
    user-specific metrics can be computed as part of the diagnostic,
    and then plotted in various manners.

    Arguments:
        cfg - nested dictionary of metadata

    Returns:
        string; runs the user diagnostic

    """
    # assemble the data dictionary keyed by dataset name
    # this makes use of the handy group_metadata function that
    # orders the data by 'dataset'; the resulting dictionary is
    # keyed on datasets e.g. dict = {'MPI-ESM-LR': [var1, var2...]}
    # where var1, var2 are dicts holding all needed information per variable
    my_files_dict = group_metadata(cfg['input_data'].values(), 'dataset')

    # iterate over key(dataset) and values(list of vars)
    for key, value in my_files_dict.items():
        # load the cube from data files only
        # using a single variable here so just grab the first (and only)
        # list element
        cube = iris.load_cube(value[0]['filename'])

        # the first data analysis bit: simple cube difference:
        # perform a difference between ground and first levels
        diff_cube = cube[:, 0, :, :] - cube[:, 1, :, :]
        # square the difference'd cube just for fun
        squared_cube = diff_cube**2.

        # the second data analysis bit (slightly more advanced):
        # compute an area average over the squared cube
        # to apply the area average use a preprocessor function
        # rather than writing your own function
        area_avg_cube = area_statistics(squared_cube, 'mean')

        # finalize your analysis by plotting a time series of the
        # diffed, squared and area averaged cube; call the plot function:
        _plot_time_series(cfg, area_avg_cube, key)

    # that's it, we're done!
    return 'I am done with my first ESMValTool diagnostic!'
예제 #2
0
def run_area_statistics(
    cube,
    settings: typing.Dict,
    **kwargs,
):
    operator = settings["operator"]
    fx_variables = None

    cubes = area_statistics(cube, operator=operator, fx_variables=fx_variables)
    return cubes
예제 #3
0
 def get_area_statistic(self, stat):
     """
     Get the area statistic of an ESMVariable instance's Iris cube.
     
     Parameters
     ----------
     stat : str
         Area statistic. Passed to ESMValCore area statistics function.
         
     Returns
     -------
     ESMVariable instance
         ESMVariable with area statistic cube.
     """
     area_stat_cube = area_statistics(self.cube, stat)
     self.update_cube(area_stat_cube)
     return self
def main(cfg):
    """Process data for use as input to the marrmot hydrological model.

    These variables are needed in all_vars:
    tas (air_temperature)
    pr (precipitation_flux)
    psl (air_pressure_at_mean_sea_level)
    rsds (surface_downwelling_shortwave_flux_in_air)
    rsdt (toa_incoming_shortwave_flux)
    """
    input_metadata = cfg['input_data'].values()
    for dataset, metadata in group_metadata(input_metadata, 'dataset').items():
        all_vars, provenance = get_input_cubes(metadata)

        # Fix time coordinate of ERA5 instantaneous variables
        if dataset == 'ERA5':
            _shift_era5_time_coordinate(all_vars['psl'])
            _shift_era5_time_coordinate(all_vars['tas'])

        # Processing variables and unit conversion
        # Unit of the fluxes in marrmot should be in kg m-2 day-1 (or mm/day)
        logger.info("Processing variable PET")
        pet = debruin_pet(
            psl=all_vars['psl'],
            rsds=all_vars['rsds'],
            rsdt=all_vars['rsdt'],
            tas=all_vars['tas'],
        )
        pet = preproc.area_statistics(pet, operator='mean')
        pet.convert_units('kg m-2 day-1')  # equivalent to mm/day

        logger.info("Processing variable tas")
        temp = preproc.area_statistics(all_vars['tas'], operator='mean')
        temp.convert_units('celsius')

        logger.info("Processing variable pr")
        precip = preproc.area_statistics(all_vars['pr'], operator='mean')
        precip.convert_units('kg m-2 day-1')  # equivalent to mm/day

        # Get the start and end times and latitude longitude
        time_start_end, lat_lon = _get_extra_info(temp)

        # make data structure
        # delta_t_days could also be extracted from the cube
        output_data = {
            'forcing': {
                'precip': precip.data,
                'temp': temp.data,
                'pet': pet.data,
                'delta_t_days': float(1),
                'time_unit': 'day',
            },
            'time_start': time_start_end[0],
            'time_end': time_start_end[1],
            'data_origin': lat_lon,
        }

        # Save to matlab structure
        basename = '_'.join([
            'marrmot',
            dataset,
            cfg['basin'],
            str(int(output_data['time_start'][0])),
            str(int(output_data['time_end'][0])),
        ])
        output_name = get_diagnostic_filename(basename, cfg, extension='mat')
        sio.savemat(output_name, output_data)

        # Store provenance
        with ProvenanceLogger(cfg) as provenance_logger:
            provenance_logger.log(output_name, provenance)
예제 #5
0
def run_my_diagnostic(cfg):
    """
    Simple example of a diagnostic.

    This is a basic (and rather esotherical) diagnostic that firstly
    loads the needed model data as iris cubes, performs a difference between
    values at ground level and first vertical level, then squares the
    result.

    Before plotting, we grab the squared result (not all operations on cubes)
    and apply an area average on it. This is a useful example of how to use
    standard esmvalcore.preprocessor functionality within a diagnostic, and
    especially after a certain (custom) diagnostic has been run and the user
    needs to perform an operation that is already part of the preprocessor
    standard library of functions.

    The user will implement their own (custom) diagnostics, but this
    example shows that once the preprocessor has finished a whole lot of
    user-specific metrics can be computed as part of the diagnostic,
    and then plotted in various manners.

    Parameters
    ----------
    cfg - Dictionary
        Nested dictionary containing dataset names and variables.

    Returns
    -------
    None.

    Notes
    -----
    * Since the preprocessor extracts the 1000 hPa level data,
      the cube's data will have shape (36, 180, 360) corresponding
      to time (in months), latitude, longitude. 

    Change log
    ----------
    2020-05-04
        * NumPy-ize documentation.
        * Configure to plot multiple variables on one plot.
        * Pass list containing variable tuples to plotting function. 
    """
    # assemble the data dictionary keyed by dataset name
    # this makes use of the handy group_metadata function that
    # orders the data by 'dataset'; the resulting dictionary is
    # keyed on datasets e.g. dict = {'MPI-ESM-LR': [var1, var2...]}
    # where var1, var2 are dicts holding all needed information per variable
    my_files_dict = group_metadata(cfg['input_data'].values(), 'dataset')
    
    var_list = []
    # iterate over key(dataset) and values(list of vars)
    for key, value in my_files_dict.items():
        # load the cube from data files only
        # using a single variable here so just grab the first (and only)
        # list element
        cube = iris.load_cube(value[0]['filename'])
        print('KEY: {}'.format(key))
        print('Cube shape: {}'.format(cube.data.shape))
        print('Cube coords: {}'.format(cube.coords))
        # compute an area average over the cube using the preprocessor
        # The cube contains only 100000 Pa level data (see recipe).
        area_avg_cube = area_statistics(cube, 'mean') 
        # Append the cfg, area_avg_cube, and key tuple to variable list
        var_list.append((cfg, area_avg_cube, key))
    _plot_time_series(var_list)