Ejemplo n.º 1
0
def main(cfg):
    """Run the diagnostic.

    Parameters
    ----------
    cfg : dict
        Configuration dictionary of the recipe.
    """
    # Get dataset and variable information
    logging.debug("Found datasets in recipe:\n%s", diag.Datasets(cfg))
    logging.debug("Found variables in recipe:\n%s", diag.Variables(cfg))

    # Check for correct variables
    if not diag.Variables(cfg).vars_available('pr', 'mrro', 'evspsbl'):
        raise ValueError(
            "Diagnostic requires precipitation, runoff and evaporation data")

    # Read catchmentmask
    # to check: Correct way to read auxillary data using recipes?
    my_catch = get_catchment_data(cfg)

    # Read data, convert units and compute long term means
    # to check: Shouldn't this be part of preprocessing?
    # to check: How to regrid onto catchment_cube grid
    #           with preproc recipe statements
    #           instead of using regrid here?
    allcubes = {}
    plotdata = {}
    for datapath in diag.Datasets(cfg):
        # Get simulation data
        var, identifier, cube = get_sim_data(cfg, datapath, my_catch['cube'])
        # Get river catchment averages
        rivervalues = get_catch_avg(my_catch, cube)
        # Sort into data dictionaries
        datainfo = diag.Datasets(cfg).get_dataset_info(path=datapath)
        model = datainfo['dataset']
        if model == datainfo.get('reference_dataset', None):
            update_reference(my_catch, model, rivervalues, var)
        else:
            update_plotdata(identifier, plotdata, rivervalues, var)

        # Append to cubelist for temporary output
        if model not in allcubes.keys():
            allcubes[model] = []
        allcubes[model].append(cube)

    # Write regridded and temporal aggregated netCDF data files (one per model)
    # to do: update attributes, something fishy with unlimited dimension
    for model, mcube in allcubes.items():
        filepath = os.path.join(cfg[diag.names.WORK_DIR],
                                '_'.join(['postproc', model]) + '.nc')
        if cfg[diag.names.WRITE_NETCDF]:
            iris.save(mcube, filepath)
            logger.info("Writing %s", filepath)

    # Write plotdata as ascii files for user information
    write_plotdata(cfg, plotdata, my_catch)

    # Plot catchment data
    make_catchment_plots(cfg, plotdata, my_catch)
def main(cfg):
    """Compute the time average for each input dataset."""
    datasets = e.Datasets(cfg)
    variables = e.Variables(cfg)
    logger.debug("Found datasets: %s", datasets)
    logger.debug("Found variables: %s", variables)

    for path in datasets:
        logger.info("Processing variable %s from dataset %s",
                    datasets.get_info(n.STANDARD_NAME, path),
                    datasets.get_info(n.DATASET, path))

        logger.debug("Loading %s", path)
        cube = iris.load_cube(path)

        logger.debug("Running example computation")
        cube = cube.collapsed(n.TIME, iris.analysis.MEAN)

        name = os.path.splitext(os.path.basename(path))[0] + '_mean'
        if cfg[n.WRITE_NETCDF]:
            filepath = os.path.join(cfg[n.WORK_DIR], name + '.nc')
            logger.debug("Saving analysis results to %s", filepath)
            iris.save(cube, target=filepath)

        if cfg[n.WRITE_PLOTS] and cfg.get('quickplot'):
            filepath = os.path.join(cfg[n.PLOT_DIR],
                                    name + '.' + cfg[n.OUTPUT_FILE_TYPE])
            logger.debug("Plotting analysis results to %s", filepath)
            e.plot.quickplot(cube, filename=filepath, **cfg['quickplot'])
Ejemplo n.º 3
0
def main(cfg):
    """Run the diagnostic.

    Parameters
    ----------
    cfg : dict
        Configuration dictionary of the recipe.
    """
    # Print dataset and variable information
    logging.debug("Found datasets in recipe:\n%s", diag.Datasets(cfg))
    logging.debug("Found variables in recipe:\n%s", diag.Variables(cfg))

    # Get metadata information
    grouped_input_data = diag.group_metadata(cfg['input_data'].values(),
                                             'standard_name',
                                             sort='dataset')

    # Prepare dictionaries
    timcubes = {
        'exp': {key: []
                for key in diag.Variables(cfg).short_names()},
        'ref': {key: []
                for key in diag.Variables(cfg).short_names()}
    }
    lcdata = {key: {} for key in diag.Variables(cfg).short_names()}
    refset = {}
    prov_rec = {key: {} for key in diag.Variables(cfg).short_names()}

    # Read data and compute long term means
    for standard_name in grouped_input_data:
        for attributes in grouped_input_data[standard_name]:
            get_timmeans(attributes, timcubes, refset, prov_rec)

    for var in diag.Variables(cfg).short_names():
        # Write regridded and temporal aggregated netCDF data files
        write_data(cfg, timcubes, var, prov_rec)
        # Compute aggregated and fraction average land cover
        regnam = compute_landcover(var, lcdata,
                                   timcubes['exp'][var] + timcubes['ref'][var])

    # Reshuffle data if models are the comparison target
    if cfg.get('comparison', 'variable') == 'model':
        focus2model(cfg, lcdata, refset)
        prov_rec = None
    elif cfg.get('comparison', 'variable') != 'variable':
        raise ValueError('Only variable or model are valid comparison targets')

    # Output ascii files and plots
    for target in lcdata.keys():
        # Write plotdata as ascii files for user information
        infos = [cfg, regnam, prov_rec, target]
        write_plotdata(infos, lcdata[target]['groups'],
                       lcdata[target]['values'])

        # Plot area values
        make_landcover_bars(cfg, regnam, lcdata[target]['groups'],
                            lcdata[target]['values'], target)
Ejemplo n.º 4
0
def get_sim_data(cfg, datapath, catchment_cube):
    """Read and postprocess netcdf data from experiments.

    Check units, aggregate to long term mean yearly sum and
    regrid to resolution of catchment mask.

    Parameters
    ----------
    cfg : dict
        Configuration dictionary of the recipe.
    dataset_path : str
        Path to the netcdf file
    catchment_cube : obj
        iris cube object containing simulation data
    """
    datainfo = diag.Datasets(cfg).get_dataset_info(path=datapath)
    identifier = "_".join(
        [datainfo['dataset'].upper(), datainfo['exp'], datainfo['ensemble']])
    # Load data into iris cube
    new_cube = iris.load(datapath, diag.Variables(cfg).standard_names())[0]
    # Check for expected unit
    if new_cube.units != 'kg m-2 s-1':
        raise ValueError('Unit [kg m-2 s-1] is expected for ',
                         new_cube.long_name.lower(), ' flux')
    # Convert to unit mm per month
    timelist = new_cube.coord('time')
    daypermonth = []
    for mydate in timelist.units.num2date(timelist.points):
        daypermonth.append(calendar.monthrange(mydate.year, mydate.month)[1])
    new_cube.data *= 86400.0
    for i, days in enumerate(daypermonth):
        new_cube.data[i] *= days
    # Aggregate over year --> unit mm per year
    iris.coord_categorisation.add_year(new_cube, 'time')
    year_cube = new_cube.aggregated_by('year', iris.analysis.SUM)
    year_cube.units = "mm a-1"
    # Compute long term mean
    mean_cube = year_cube.collapsed([diag.names.TIME], iris.analysis.MEAN)
    # Regrid to catchment data grid --> maybe use area_weighted instead?
    if mean_cube.coord('latitude').bounds is None:
        mean_cube.coord('latitude').guess_bounds()
    if mean_cube.coord('longitude').bounds is None:
        mean_cube.coord('longitude').guess_bounds()
    m_grid = [iris.analysis.Linear(), iris.analysis.AreaWeighted()]
    mean_cube_regrid = mean_cube.regrid(catchment_cube, m_grid[1])

    return datainfo['short_name'], identifier, mean_cube_regrid
Ejemplo n.º 5
0
def main(cfg):
    """Run the diagnostic."""
    ###########################################################################
    # Read recipe data
    ###########################################################################

    # Dataset data containers
    data = e.Datasets(cfg)
    logging.debug("Found datasets in recipe:\n%s", data)

    # Variables
    var = e.Variables(cfg)
    logging.debug("Found variables in recipe:\n%s", var)

    # Check for tas and rlnst
    if not var.vars_available('pr', 'ua', 'va', 'ts'):
        raise ValueError("This diagnostic needs 'pr', 'ua', " +
                         " 'va', and 'ts'")

    available_exp = list(group_metadata(cfg['input_data'].values(), 'exp'))

    if 'historical' not in available_exp:
        raise ValueError("The diagnostic needs an historical experiment " +
                         " and one other experiment.")

    if len(available_exp) != 2:
        raise ValueError("The diagnostic needs an two model experiments: " +
                         " onehistorical and one other one.")

    available_exp.remove('historical')
    future_exp = available_exp[0]
    ###########################################################################
    # Read data
    ###########################################################################

    # Create iris cube for each dataset and save annual means
    for dataset_path in data:
        cube = iris.load(dataset_path)[0]
        cat.add_month_number(cube, 'time', name='month_number')
        # MJJAS mean (monsoon season)
        cube = cube[np.where(
            np.absolute(cube.coord('month_number').points - 7) <= 2)]
        cube = cube.collapsed('time', iris.analysis.MEAN)

        short_name = data.get_info(n.SHORT_NAME, dataset_path)
        if short_name == 'pr':
            # convert from kg m-2 s-1 to mm d-1
            # cube.convert_units('mm d-1') doesn't work.
            cube.data = cube.data * (60.0 * 60.0 * 24.0)
            cube.units = 'mm d-1'
            # Possible because all data must be interpolated to the same grid.
            if 'lats' not in locals():
                lats = cube.coord('latitude').points
                lons = cube.coord('longitude').points

        data.set_data(cube.data, dataset_path)
    ###########################################################################
    # Process data
    ###########################################################################

    data_ar = substract_li(cfg, data, lats, lons, future_exp)

    # data_ar {"datasets": datasets, "ar_diff_rain": ar_diff_rain,
    #          "ar_diff_ua": ar_diff_ua, "ar_diff_va": ar_diff_va,
    #          "ar_hist_rain": ar_hist_rain, "mism_diff_rain": mism_diff_rain,
    #          "mwp_hist_rain": mwp_hist_rain}

    plot_rain_and_wind(cfg, 'Multi-model_mean',
                       {'ar_diff_rain': data_ar["ar_diff_rain"],
                        'ar_diff_ua': data_ar["ar_diff_ua"],
                        'ar_diff_va': data_ar["ar_diff_va"],
                        'lats': lats, 'lons': lons}, future_exp)

    # Regression between mean ISM rain difference and historical rain
    reg2d = get_reg_2d_li(data_ar["mism_diff_rain"], data_ar["ar_hist_rain"],
                          lats, lons)

    plot_2dcorrelation_li(cfg, reg2d, lats, lons)

    plot_reg_li(cfg, data_ar, future_exp)

    # Regression between mean WP rain and rain difference for each location
    reg2d_wp = get_reg_2d_li(data_ar["mwp_hist_rain"], data_ar["ar_diff_rain"],
                             lats, lons)

    data_ar2 = correct_li(data_ar, lats, lons, reg2d_wp)
    # return {"datasets": data["datasets"], "ar_diff_cor": ar_diff_cor,
    #         "proj_err": proj_err, "mism_diff_cor": mism_diff_cor,
    #         "mism_hist_rain": mism_hist_rain, "mwp_hist_cor": mwp_hist_cor}

    plot_reg_li2(cfg, data_ar["datasets"], data_ar["mism_diff_rain"],
                 data_ar2["mism_diff_cor"], data_ar2["mism_hist_rain"])

    plot_rain(cfg, 'Multi-model mean rainfall change due to model error',
              np.mean(data_ar2["proj_err"], axis=2), lats, lons)
    plot_rain(cfg, 'Corrected multi-model mean rainfall change',
              np.mean(data_ar2["ar_diff_cor"], axis=2), lats, lons)
Ejemplo n.º 6
0
def main(cfg):
    """Execute the program.

    Argument cfg, containing directory paths, preprocessed input dataset
    filenames and user-defined options, is passed by ESMValTool preprocessor.
    """
    provlog = ProvenanceLogger(cfg)
    lorenz = lorenz_cycle
    comp = computations
    logger.info('Entering the diagnostic tool')
    # Load paths
    wdir_up = cfg['work_dir']
    pdir_up = cfg['plot_dir']
    input_data = cfg['input_data'].values()
    logger.info('Work directory: %s \n', wdir_up)
    logger.info('Plot directory: %s \n', pdir_up)
    plotsmod = plot_script
    data = e.Datasets(cfg)
    logger.debug(data)
    models = data.get_info_list('dataset')
    model_names = list(set(models))
    model_names.sort()
    logger.info(model_names)
    varnames = data.get_info_list('short_name')
    curr_vars = list(set(varnames))
    logger.debug(curr_vars)
    # load user-defined options
    lsm = str(cfg['lsm'])
    wat = str(cfg['wat'])
    lec = str(cfg['lec'])
    entr = str(cfg['entr'])
    met = str(cfg['met'])
    flags = [wat, lec, entr, met]
    # Initialize multi-model arrays
    modnum = len(model_names)
    te_all = np.zeros(modnum)
    toab_all = np.zeros([modnum, 2])
    toab_oc_all = np.zeros(modnum)
    toab_la_all = np.zeros(modnum)
    atmb_all = np.zeros([modnum, 2])
    atmb_oc_all = np.zeros(modnum)
    atmb_la_all = np.zeros(modnum)
    surb_all = np.zeros([modnum, 2])
    surb_oc_all = np.zeros(modnum)
    surb_la_all = np.zeros(modnum)
    wmb_all = np.zeros([modnum, 2])
    wmb_oc_all = np.zeros(modnum)
    wmb_la_all = np.zeros(modnum)
    latent_all = np.zeros([modnum, 2])
    latent_oc_all = np.zeros(modnum)
    latent_la_all = np.zeros(modnum)
    baroc_eff_all = np.zeros(modnum)
    lec_all = np.zeros([modnum, 2])
    horzentr_all = np.zeros([modnum, 2])
    vertentr_all = np.zeros([modnum, 2])
    matentr_all = np.zeros([modnum, 2])
    irrevers_all = np.zeros(modnum)
    diffentr_all = np.zeros([modnum, 2])
    logger.info("Entering main loop\n")
    i_m = 0
    for model in model_names:
        # Load paths to individual models output and plotting directories
        wdir = os.path.join(wdir_up, model)
        pdir = os.path.join(pdir_up, model)
        os.makedirs(wdir)
        os.makedirs(pdir)
        aux_file = wdir + '/aux.nc'
        te_ymm_file, te_gmean_constant, te_file = mkthe.init_mkthe_te(
            model, wdir, input_data)
        te_all[i_m] = te_gmean_constant
        logger.info('Computing energy budgets\n')
        in_list, eb_gmean, eb_file, toab_ymm_file = comp.budgets(
            model, wdir, aux_file, input_data)
        prov_rec = provenance_meta.get_prov_map(
            ['TOA energy budgets', model],
            [in_list[4], in_list[6], in_list[7]])
        provlog.log(eb_file[0], prov_rec)
        prov_rec = provenance_meta.get_prov_map(
            ['atmospheric energy budgets', model], [
                in_list[0], in_list[1], in_list[2], in_list[3], in_list[4],
                in_list[5], in_list[6], in_list[7], in_list[8]
            ])
        provlog.log(eb_file[1], prov_rec)
        prov_rec = provenance_meta.get_prov_map(
            ['surface energy budgets', model], [
                in_list[0], in_list[1], in_list[2], in_list[3], in_list[5],
                in_list[7]
            ])
        provlog.log(eb_file[2], prov_rec)
        toab_all[i_m, 0] = np.nanmean(eb_gmean[0])
        toab_all[i_m, 1] = np.nanstd(eb_gmean[0])
        atmb_all[i_m, 0] = np.nanmean(eb_gmean[1])
        atmb_all[i_m, 1] = np.nanstd(eb_gmean[1])
        surb_all[i_m, 0] = np.nanmean(eb_gmean[2])
        surb_all[i_m, 1] = np.nanstd(eb_gmean[2])
        logger.info('Global mean emission temperature: %s\n',
                    te_gmean_constant)
        logger.info('TOA energy budget: %s\n', toab_all[i_m, 0])
        logger.info('Atmospheric energy budget: %s\n', atmb_all[i_m, 0])
        logger.info('Surface energy budget: %s\n', surb_all[i_m, 0])
        logger.info('Done\n')
        baroc_eff_all[i_m] = comp.baroceff(model, wdir, aux_file,
                                           toab_ymm_file, te_ymm_file)
        logger.info('Baroclinic efficiency (Lucarini et al., 2011): %s\n',
                    baroc_eff_all[i_m])
        logger.info('Running the plotting module for the budgets\n')
        plotsmod.balances(cfg, wdir_up, pdir,
                          [eb_file[0], eb_file[1], eb_file[2]],
                          ['toab', 'atmb', 'surb'], model)
        logger.info('Done\n')
        # Water mass budget
        if wat == 'True':
            (wm_file,
             wmb_all[i_m, 0],
             wmb_all[i_m, 1],
             latent_all[i_m, 0],
             latent_all[i_m, 1]) = compute_water_mass_budget(
                 cfg, wdir_up, pdir, model, wdir, input_data, flags, aux_file)
        if lsm == 'True':
            sftlf_fx = e.select_metadata(input_data,
                                         short_name='sftlf',
                                         dataset=model)[0]['filename']
            logger.info('Computing energy budgets over land and oceans\n')
            toab_oc_all[i_m], toab_la_all[i_m] = compute_land_ocean(
                model, wdir, eb_file[0], sftlf_fx, 'toab')
            atmb_oc_all[i_m], atmb_la_all[i_m] = compute_land_ocean(
                model, wdir, eb_file[1], sftlf_fx, 'atmb')
            surb_oc_all[i_m], surb_la_all[i_m] = compute_land_ocean(
                model, wdir, eb_file[2], sftlf_fx, 'surb')
            if wat == 'True':
                logger.info('Computing water mass and latent energy'
                            ' budgets over land and oceans\n')
                wmb_oc_all[i_m], wmb_la_all[i_m] = compute_land_ocean(
                    model, wdir, wm_file[0], sftlf_fx, 'wmb')
                latent_oc_all[i_m], latent_la_all[i_m] = compute_land_ocean(
                    model, wdir, wm_file[1], sftlf_fx, 'latent')
            logger.info('Done\n')
        if lec == 'True':
            logger.info('Computation of the Lorenz Energy '
                        'Cycle (year by year)\n')
            _, _ = mkthe.init_mkthe_lec(model, wdir, input_data)
            lect = lorenz.preproc_lec(model, wdir, pdir, input_data)
            lec_all[i_m, 0] = np.nanmean(lect)
            lec_all[i_m, 1] = np.nanstd(lect)
            logger.info(
                'Intensity of the annual mean Lorenz Energy '
                'Cycle: %s\n', lec_all[i_m, 0])
            logger.info('Done\n')
        else:
            lect = np.repeat(2.0, len(eb_gmean[0]))
            lec_all[i_m, 0] = 2.0
            lec_all[i_m, 1] = 0.2
        if entr == 'True':
            if met in {'1', '3'}:
                logger.info('Computation of the material entropy production '
                            'with the indirect method\n')
                indentr_list = [te_file, eb_file[0]]
                horz_mn, vert_mn, horzentr_file, vertentr_file = comp.indentr(
                    model, wdir, indentr_list, input_data, aux_file,
                    eb_gmean[0])
                listind = [horzentr_file, vertentr_file]
                provenance_meta.meta_indentr(cfg, model, input_data, listind)
                horzentr_all[i_m, 0] = np.nanmean(horz_mn)
                horzentr_all[i_m, 1] = np.nanstd(horz_mn)
                vertentr_all[i_m, 0] = np.nanmean(vert_mn)
                vertentr_all[i_m, 1] = np.nanstd(vert_mn)
                logger.info(
                    'Horizontal component of the material entropy '
                    'production: %s\n', horzentr_all[i_m, 0])
                logger.info(
                    'Vertical component of the material entropy '
                    'production: %s\n', vertentr_all[i_m, 0])
                logger.info('Done\n')
                logger.info('Running the plotting module for the material '
                            'entropy production (indirect method)\n')
                plotsmod.entropy(pdir, vertentr_file, 'sver',
                                 'Vertical entropy production', model)
                logger.info('Done\n')
            if met in {'2', '3'}:
                matentr, irrevers, entr_list = comp.direntr(
                    logger, model, wdir, input_data, aux_file, te_file, lect,
                    flags)
                provenance_meta.meta_direntr(cfg, model, input_data, entr_list)
                matentr_all[i_m, 0] = matentr
                if met in {'3'}:
                    diffentr = (float(np.nanmean(vert_mn)) +
                                float(np.nanmean(horz_mn)) - matentr)
                    logger.info('Difference between the two '
                                'methods: %s\n', diffentr)
                    diffentr_all[i_m, 0] = diffentr
                logger.info('Degree of irreversibility of the '
                            'system: %s\n', irrevers)
                irrevers_all[i_m] = irrevers
                logger.info('Running the plotting module for the material '
                            'entropy production (direct method)\n')
                plotsmod.init_plotentr(model, pdir, entr_list)
                logger.info('Done\n')
            os.remove(te_file)
        os.remove(te_ymm_file)
        logger.info('Done for model: %s \n', model)
        i_m = i_m + 1
    logger.info('I will now start multi-model plots')
    logger.info('Meridional heat transports\n')
    plotsmod.plot_mm_transp(model_names, wdir_up, pdir_up)
    logger.info('Scatter plots')
    summary_varlist = [
        atmb_all, baroc_eff_all, horzentr_all, lec_all, matentr_all, te_all,
        toab_all, vertentr_all
    ]
    plotsmod.plot_mm_summaryscat(pdir_up, summary_varlist)
    logger.info('Scatter plots for inter-annual variability of'
                ' some quantities')
    eb_list = [toab_all, atmb_all, surb_all]
    plotsmod.plot_mm_ebscatter(pdir_up, eb_list)
    logger.info("The diagnostic has finished. Now closing...\n")
def main(cfg):
    """Run the diagnostic.

    Parameters :

    ----------
    cfg : dict
        Configuration dictionary of the recipe.

    """
    ###########################################################################
    # Read recipe data
    ###########################################################################

    # Dataset data containers
    data = e.Datasets(cfg)
    logging.debug("Found datasets in recipe:\n%s", data)

    # Variables
    # var = e.Variables(cfg)
    available_vars = list(
        group_metadata(cfg['input_data'].values(), 'short_name'))
    logging.debug("Found variables in recipe:\n%s", available_vars)

    available_exp = list(group_metadata(cfg['input_data'].values(), 'exp'))

    if len(available_exp) > 6:
        raise ValueError("The diagnostic can only plot up to 6 different " +
                         "model experiments.")

    ###########################################################################
    # Read data
    ###########################################################################

    # Create iris cube for each dataset and save annual means
    for dataset_path in data:
        cube = iris.load(dataset_path)[0]
        # cube = iris.load(dataset_path, var.standard_names())[0]
        cube = cube.collapsed('time', iris.analysis.MEAN)

        data.set_data(cube.data, dataset_path)

    ###########################################################################
    # Process data
    ###########################################################################

    data_var = OrderedDict()
    for iexp in available_exp:
        data_var[iexp] = OrderedDict()
        for jvar in available_vars:
            # data_var[iexp] = OrderedDict()
            data_var[iexp][jvar] = 0.0

    pathlist = data.get_path_list(short_name=available_vars[0],
                                  exp=available_exp[0])

    for dataset_path in pathlist:

        # Substract piControl experiment from abrupt4xCO2 experiment
        dataset = data.get_info(n.DATASET, dataset_path)

        for jvar in available_vars:
            for iexp in available_exp:
                print(data_var[iexp])
                print((data_var[iexp].values()))
                (data_var[iexp])[jvar] = (data_var[iexp])[jvar] + \
                    data.get_data(short_name=jvar, exp=iexp,
                                  dataset=dataset)

    data_var_sum = {}
    for iexp in available_exp:
        data_var_sum[iexp] = np.fromiter(data_var[iexp].values(),
                                         dtype=float) / float(len(pathlist))

    # Plot ECS regression if desired
    plot_bar_deangelis(cfg, data_var_sum, available_exp, available_vars)
Ejemplo n.º 8
0
def main(cfg):
    """Run the diagnostic.

    Parameters :
    ----------
    cfg : dict
        Configuration dictionary of the recipe.

    """
    ###########################################################################
    # Read recipe data
    ###########################################################################

    # Dataset data containers
    data = e.Datasets(cfg)
    logging.debug("Found datasets in recipe:\n%s", data)

    # Variables
    var = e.Variables(cfg)
    # logging.debug("Found variables in recipe:\n%s", var)

    available_vars = list(
        group_metadata(cfg['input_data'].values(), 'short_name'))
    logging.debug("Found variables in recipe:\n%s", available_vars)

    available_exp = list(group_metadata(cfg['input_data'].values(), 'exp'))

    # Check for available variables
    required_vars = ('tas', 'lvp', 'rlnst', 'rsnst', 'rlnstcs', 'rsnstcs',
                     'hfss')
    if not e.variables_available(cfg, required_vars):
        raise ValueError("This diagnostic needs {required_vars} variables")

    # Check for experiments
    if 'abrupt-4xCO2' not in available_exp:
        if 'abrupt4xCO2' not in available_exp:
            raise ValueError("The diagnostic needs an experiment with " +
                             "4 times CO2.")

    if 'piControl' not in available_exp:
        raise ValueError("The diagnostic needs a pre industrial control " +
                         "experiment.")

    ###########################################################################
    # Read data
    ###########################################################################

    # Create iris cube for each dataset and save annual means
    for dataset_path in data:
        cube = iris.load(dataset_path)[0]
        cat.add_year(cube, 'time', name='year')
        cube = cube.aggregated_by('year', iris.analysis.MEAN)
        experiment = data.get_info(n.EXP, dataset_path)
        if experiment == PICONTROL:
            # DeAngelis use a 21 year running mean on piControl but the
            # full extend of 150 years abrupt4xCO2. I could not find out,
            # how they tread the edges, currently I just skip the mean for
            # the edges. This is not exacly the same as done in the paper,
            # small differences remain in extended data Fig 1,
            # but closer than other methods I
            # tried, e.g. skipping the edges.
            # For most data sets it would be also possible to
            # extend the piControl for 20 years, but then it would
            # not be centered means of piControl for each year of
            # abrupt4xCO2 any more.
            cube_new = cube.rolling_window('time', iris.analysis.MEAN, 21)
            endm10 = len(cube.coord('time').points) - 10
            cube.data[10:endm10] = cube_new.data

        data.set_data(cube.data, dataset_path)

    ###########################################################################
    # Process data
    ###########################################################################

    data_dict = substract_and_reg_deangelis2(cfg, data, var)

    plot_slope_regression(cfg, data_dict)
    plot_slope_regression_all(cfg, data_dict, available_vars)