示例#1
0
def plot_data(*, cfg: dict, datasets: dict, xaxis: str, yaxis: str,
              xlabel: str, ylabel: str, caption: str, name: str,
              ancestors: list):
    """Plot data."""
    figure, _ = plt.subplots(dpi=300)

    for label in datasets.dataset:
        label = str(label.data)
        dataset = datasets.sel(dataset=label)
        if 'time' in dataset:
            dataset = dataset.dropna(dim='time')  # remove nan
            figure.autofmt_xdate()  # rotate date labels
        plt.plot(dataset[xaxis], dataset[yaxis], label=label)

    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title(caption)
    plt.legend()
    plt.show()

    filename_plot = get_plot_filename(name + '_plot', cfg)
    figure.savefig(filename_plot, dpi=300, bbox_inches='tight')
    plt.close(figure)

    # Store provenance
    log_provenance(caption, filename_plot, cfg, ancestors)
示例#2
0
def visualize_and_save_independence(independence: 'xr.DataArray', cfg: dict,
                                    ancestors: list):
    """Visualize independence."""
    variable = independence.variable_group
    labels = list(independence.model_ensemble.values)

    figure, axes = plt.subplots(figsize=(15, 15),
                                subplot_kw={'aspect': 'equal'})
    chart = sns.heatmap(
        independence,
        linewidths=1,
        cmap="YlGn",
        xticklabels=labels,
        yticklabels=labels,
        cbar_kws={'label': f'Euclidean distance ({independence.units})'},
        ax=axes,
    )
    chart.set_title(f'Distance matrix for {variable}')

    filename_plot = get_plot_filename(f'independence_{variable}', cfg)
    figure.savefig(filename_plot, dpi=300, bbox_inches='tight')
    plt.close(figure)

    filename_data = get_diagnostic_filename(f'independence_{variable}',
                                            cfg,
                                            extension='nc')
    independence.to_netcdf(filename_data)

    caption = f'Euclidean distance matrix for variable {variable}'
    log_provenance(caption, filename_plot, cfg, ancestors)
    log_provenance(caption, filename_data, cfg, ancestors)
def _plot_boxplot(cfg, data_frame, plot_name):
    """Plot boxplot."""
    boxplot_kwargs = {
        'color': 'b',
        'data': data_frame,
        'showfliers': False,
        'showmeans': True,
        'meanprops': {
            'marker': 'x',
            'markeredgecolor': 'k',
            'markerfacecolor': 'k',
            'markersize': 8,
        },
        'whis': [0, 100],
    }
    boxplot_kwargs.update(mlr_plot.get_plot_kwargs(cfg, plot_name))
    sns.boxplot(**boxplot_kwargs)
    sns.swarmplot(data=data_frame, color='k', alpha=0.6)

    # Plot appearance
    plt.ylim(0.0, plt.ylim()[1])
    mlr_plot.process_pyplot_kwargs(cfg, plot_name)

    # Save plot
    plot_path = get_plot_filename(plot_name, cfg)
    plt.savefig(plot_path, **mlr_plot.get_savefig_kwargs(cfg))
    logger.info("Wrote %s", plot_path)
    plt.close()
    return plot_path
def _plot_cube(cube, cfg):
    """Plot the transition cube."""
    # Also plot the transition_cube
    if not cube.ndim == 2:
        raise ValueError("Cube should be two-dimensional")
    plt.clf()
    cow = plt.axes(projection=crs.PlateCarree())
    cow.add_feature(cfeature.LAND)
    iris.quickplot.pcolormesh(cube, vmin=-.24, vmax=.24, cmap='bwr')
    # Set title/suptitle for plot
    if 'plottitle' in cube.attributes:
        plt.title(cube.attributes['plottitle'])
    if 'suptitle' in cube.attributes:
        plt.suptitle(cube.attributes['plotsuptitle'])
    # Draw coast lines
    plt.gca().coastlines()
    # Get right path for saving plots from the cfg dictionary.
    if 'parent_mip_era' in cube.attributes:
        model_attr_name = 'source_id' if\
            cube.attributes['parent_mip_era'] == 'CMIP6'\
            else 'model_id'
    else:  # In this case it must be OBS, and we set it to model_id explicitly
        model_attr_name = 'model_id'
    basename = cube.attributes[model_attr_name] + '_'\
        + cube.name().replace(' ', '_')
    savename_fig = get_plot_filename(basename, cfg)
    logger.info("Saving figure as: %s", savename_fig)
    plt.savefig(savename_fig)
def plot_bar_deangelis(cfg, data_var_sum, available_exp, available_vars):
    """Plot linear regression used to calculate ECS."""
    if not cfg[n.WRITE_PLOTS]:
        return

    # Plot data
    fig, axx = plt.subplots()

    set_colors = [
        'cornflowerblue', 'orange', 'silver', 'limegreen', 'rosybrown',
        'orchid'
    ]
    bar_width = 1.0 / float(len(available_vars))

    for iii, iexp in enumerate(available_exp):
        axx.bar(np.arange(len(available_vars)) + bar_width * float(iii),
                data_var_sum[iexp],
                bar_width,
                color=set_colors[iii],
                label=iexp)

    axx.set_xlabel(' ')
    axx.set_ylabel(r'Model mean (W m$^{-2}$)')
    axx.set_title(' ')
    axx.set_xticks(np.arange(len(available_vars)) + bar_width)
    axx.set_xticklabels(available_vars)
    axx.legend(loc=1)

    fig.tight_layout()
    fig.savefig(get_plot_filename('bar_all', cfg), dpi=300)
    plt.close()

    caption = 'Global average multi-model mean comparing different ' + \
              'model experiments and flux variables.'

    provenance_record = get_provenance_record(
        _get_sel_files_var(cfg, available_vars), caption, ['mean'], ['global'])

    diagnostic_file = get_diagnostic_filename('bar_all', cfg)

    logger.info("Saving analysis results to %s", diagnostic_file)

    list_dict = {}
    list_dict["data"] = []
    list_dict["name"] = []
    for iexp in available_exp:
        list_dict["data"].append(data_var_sum[iexp])
        list_dict["name"].append({
            'var_name': iexp + '_all',
            'long_name': 'Fluxes for ' + iexp + ' experiment',
            'units': 'W m-2'
        })

    iris.save(cube_to_save_vars(list_dict), target=diagnostic_file)

    logger.info("Recording provenance of %s:\n%s", diagnostic_file,
                pformat(provenance_record))
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(diagnostic_file, provenance_record)
def _create_feedback_plot(tas_cube, cube, dataset_name, cfg, description=None):
    """Plot feedback parameter vs. remaining dimensions."""
    var = cube.var_name
    logger.debug("Plotting '%s' vs. %s for '%s'", SHORTER_NAMES.get(var, var),
                 COORDS['rad'], dataset_name)
    x_data = _get_data_time_last(tas_cube)
    y_data = _get_data_time_last(cube)
    coords = [(coord, idx - 1)
              for (idx, coord) in enumerate(cube.coords(dim_coords=True))
              if coord.name() != 'time']
    feedback_cube = iris.cube.Cube(_get_slope(x_data, y_data),
                                   var_name=var,
                                   dim_coords_and_dims=coords,
                                   units='W m-2 K-1')

    # Plot
    if feedback_cube.ndim == 1:
        iplt.plot(feedback_cube)
        plt.xlabel(f"{COORDS['rad'][0]} / "
                   f"{cube.coord(COORDS['rad'][0]).units.origin}")
        plt.ylabel(f"{NICE_SYMBOLS.get(var, var)} / "
                   f"{NICE_UNITS.get(feedback_cube.units.origin, 'unknown')}")
        colorbar = None
    elif feedback_cube.ndim == 2:
        iplt.contourf(feedback_cube, cmap='bwr', levels=_get_levels())
        colorbar = plt.colorbar(orientation='horizontal')
        colorbar.set_label(
            f"{NICE_SYMBOLS.get(var, var)} / "
            f"{NICE_UNITS.get(feedback_cube.units.origin, 'unknown')}")
        ticks = [-8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0]
        colorbar.set_ticks(ticks)
        colorbar.set_ticklabels([str(tick) for tick in ticks])
        if COORDS['rad'] == ['latitude', 'longitude']:
            plt.gca().coastlines()
        else:
            plt.xlabel(f"{COORDS['rad'][0]} / "
                       f"{cube.coord(COORDS['rad'][0]).units.origin}")
            plt.ylabel(f"{COORDS['rad'][1]} / "
                       f"{cube.coord(COORDS['rad'][1]).units.origin}")
    else:
        raise ValueError(f"Cube dimension {feedback_cube.ndim} not supported")

    # Appearance
    title = f'{SHORTER_NAMES.get(var, var)} for {dataset_name}'
    filename = ('{}_vs_{}_{}'.format(VAR_NAMES.get(var, var),
                                     '-'.join(COORDS['rad']), dataset_name))
    if description is not None:
        title += f' ({description})'
        filename += f"_{description.replace(' ', '_')}"
    plt.title(title)
    plot_path = get_plot_filename(filename, cfg)
    plt.savefig(plot_path,
                bbox_inches='tight',
                orientation='landscape',
                additional_artists=[colorbar])
    logger.info("Wrote %s", plot_path)
    plt.close()

    return (plot_path, feedback_cube)
示例#7
0
def su(grouped_data, cfg):
    """Su et al. (2014) constraint."""
    metric = cfg['metric']
    logger.info("Found metric '%s' for Su et al. (2014) constraint", metric)

    # Extract cubes
    (var_name, reference_datasets) = _get_su_variable(grouped_data)
    cube_dict = _get_su_cube_dict(grouped_data, var_name, reference_datasets)
    diag_data = {}
    ref_cube = cube_dict[reference_datasets]

    # Variable attributes
    var_attrs = {
        'short_name': 'alpha' if metric == 'regression_slope' else 'rho',
        'long_name': f"Error in vertically-resolved tropospheric "
                     f"zonal-average {ref_cube.long_name} between 40°N and "
                     f"45°S expressed as {metric.replace('_', ' ')} between "
                     f"model data and observations",
        'units': '1',
    }
    attrs = {
        'plot_xlabel': f'Model performance in {ref_cube.long_name} [1]',
        'plot_title': 'Su et al. (2014) constraint',
        'provenance_authors': ['schlund_manuel'],
        'provenance_domains': ['trop', 'midlat'],
        'provenance_realms': ['atmos'],
        'provenance_references': ['su14jgr'],
        'provenance_statistics': ['corr'],
        'provenance_themes': ['EC'],
    }

    # Calculate constraint
    for (dataset_name, cube) in cube_dict.items():
        logger.info("Processing dataset '%s'", dataset_name)

        # Plot cube
        if cube.ndim == 2:
            iris.quickplot.contourf(cube)
            filename = f"su_{dataset_name.replace('|', '_')}"
            plot_path = get_plot_filename(filename, cfg)
            plt.savefig(plot_path, **cfg['savefig_kwargs'])
            logger.info("Wrote %s", plot_path)
            plt.close()

            # Provenance
            netcdf_path = get_diagnostic_filename(filename, cfg)
            io.iris_save(cube, netcdf_path)
            ancestors = cube.attributes['ancestors'].split('|')
            provenance_record = ec.get_provenance_record(
                {'su': attrs}, ['su'],
                caption=f'{cube.long_name} for {dataset_name}.',
                plot_type='zonal', plot_file=plot_path, ancestors=ancestors)
            with ProvenanceLogger(cfg) as provenance_logger:
                provenance_logger.log(netcdf_path, provenance_record)

        # Similarity metric
        diag_data[dataset_name] = _similarity_metric(cube, ref_cube, metric)

    return (diag_data, var_attrs, attrs)
示例#8
0
def test_get_plot_filename():

    cfg = {
        'plot_dir': '/some/path',
        'output_file_type': 'png',
    }
    filename = shared.get_plot_filename('test', cfg)
    assert filename == '/some/path/test.png'
示例#9
0
def mapplot(dataarray, cfg, title_pattern, filename_part, ancestors,
            **colormesh_args):
    """Visualize weighted temperature."""
    period = '{start_year}-{end_year}'.format(**read_metadata(cfg)['tas'][0])
    if 'tas_reference' in read_metadata(cfg).keys():
        meta = read_metadata(cfg)['tas_reference']
        period = 'change: {} minus {start_year}-{end_year}'.format(
            period, **meta[0])
    metric = cfg['model_aggregation']
    if isinstance(metric, int):
        metric = f'{metric}perc'
    proj = ccrs.PlateCarree(central_longitude=0)
    figure, axes = plt.subplots(subplot_kw={'projection': proj})

    dataarray = set_antimeridian(dataarray, cfg.get('antimeridian', 'pacific'))
    dataarray = dataarray.dropna('lon', how='all').dropna('lat', how='all')

    dataarray.plot.pcolormesh(
        ax=axes,
        transform=ccrs.PlateCarree(),
        levels=9,
        robust=True,
        extend='both',
        **colormesh_args
        # colorbar size often does not fit nicely
        # https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
        # cbar_kwargs={'fraction': .021}
    )

    lons = dataarray.lon.values
    lats = dataarray.lat.values
    longitude_formatter = LongitudeFormatter()
    latitude_formatter = LatitudeFormatter()
    default_xticks = np.arange(np.floor(lons.min()), np.ceil(lons.max()), 10)
    default_yticks = np.arange(np.floor(lats.min()), np.ceil(lats.max()), 10)

    axes.coastlines()
    axes.set_xticks(cfg.get('xticks', default_xticks), crs=proj)
    axes.set_yticks(cfg.get('yticks', default_yticks), crs=proj)
    axes.xaxis.set_ticks_position('both')
    axes.yaxis.set_ticks_position('both')
    axes.xaxis.set_major_formatter(longitude_formatter)
    axes.yaxis.set_major_formatter(latitude_formatter)
    axes.set_xlabel('')
    axes.set_ylabel('')

    title = title_pattern.format(metric=metric, period=period)
    axes.set_title(title)

    filename_plot = get_plot_filename(filename_part, cfg)
    figure.savefig(filename_plot, dpi=300, bbox_inches='tight')
    plt.close(figure)

    filename_data = get_diagnostic_filename(filename_part, cfg, extension='nc')
    dataarray.to_netcdf(filename_data)

    log_provenance(title, filename_plot, cfg, ancestors)
    log_provenance(title, filename_data, cfg, ancestors)
def make_plot(metadata, scenarios, cfg, provenance):
    """Make figure 3, left graph.

    Multimodel values as line, reference value in black square,
    steering variables in dark dots.
    """
    fig, axes = plt.subplots()
    for member in select_metadata(metadata, variable_group='tas_cmip'):
        filename = member['filename']
        dataset = xr.open_dataset(filename)
        if 'MultiModel' not in filename:
            axes.plot(dataset.time.dt.year,
                      dataset.tas.values,
                      c='grey',
                      alpha=0.3,
                      lw=.5,
                      label='CMIP members')
        else:
            # Only display stats for the future period:
            dataset = dataset.sel(time=slice('2010', None, None))
            axes.plot(dataset.time.dt.year,
                      dataset.tas.values,
                      color='k',
                      linewidth=2,
                      label='CMIP ' + Path(filename).stem.split('_')[0][10:])

    for member in select_metadata(metadata, variable_group='tas_target'):
        filename = member['filename']
        dataset = xr.open_dataset(filename)
        if 'MultiModel' not in filename:
            axes.plot(dataset.time.dt.year,
                      dataset.tas.values,
                      color='blue',
                      linewidth=1,
                      label=member['dataset'])

    # Add the scenario's with dots at the cmip dt and bars for the periods
    for i, scenario in enumerate(scenarios):
        axes.scatter(scenario['year'],
                     scenario['cmip_dt'],
                     s=50,
                     zorder=10,
                     color='r',
                     label=r"Scenarios' steering $\Delta T_{CMIP}$")
        _timeline(axes, i, scenario['period_bounds'])

    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = dict(zip(labels, handles))  # dict removes dupes
    axes.legend(by_label.values(), by_label.keys())
    axes.set_xlabel('Year')
    axes.set_ylabel(r'Global mean $\Delta T$ (K) w.r.t. reference period')

    # Save figure
    filename = get_plot_filename('global_matching', cfg)
    fig.savefig(filename, bbox_inches='tight', dpi=300)
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(filename, provenance)
示例#11
0
def plot_time_series_spei(cfg, cube, filename, add_to_filename=''):
    """Plot time series."""
    # SPEI vector to plot
    spei = cube.data
    # Get time from cube
    print("cube.coord(time)")
    print(cube.coord('time'))
    time = cube.coord('time').points
    # Adjust (ncdf) time to the format matplotlib expects
    add_m_delta = mda.datestr2num('1850-01-01 00:00:00')
    time = time + add_m_delta

    # Get data set name from cube
    try:
        dataset_name = cube.metadata.attributes['model_id']
    except KeyError:
        try:
            dataset_name = cube.metadata.attributes['source_id']
        except KeyError:
            dataset_name = 'Observations'

    data_dict = {'data': spei,
                 'time': time,
                 'var': cfg['indexname'],
                 'dataset_name': dataset_name,
                 'unit': '1',
                 'filename': filename,
                 'area': add_to_filename}

    fig, axx = plt.subplots(figsize=(16, 4))
    axx.plot_date(time, spei, '-', tz=None, xdate=True, ydate=False,
                  color='r', linewidth=4., linestyle='-', alpha=1.,
                  marker='x')
    axx.axhline(y=-2, color='k')

    # Plot labels and title
    axx.set_xlabel('Time')
    axx.set_ylabel(cfg['indexname'])
    axx.set_title('Mean ' + cfg['indexname'] + ' ' +
                  data_dict['dataset_name'] + ' '
                  + data_dict['area'])

    # Set limits for y-axis
    axx.set_ylim(-4.0, 4.0)

    # Often improves the layout
    fig.tight_layout()
    # Save plot to file
    fig.savefig(get_plot_filename(cfg['indexname'] +
                                  '_time_series_' +
                                  data_dict['area'] +
                                  '_' +
                                  data_dict['dataset_name'], cfg), dpi=300)
    plt.close()

    _provenance_time_series_spei(cfg, data_dict)
示例#12
0
def _get_plot_filename(var_meta, cfg):
    """Return an output filename for RWR map plots."""
    basename = "_".join([var_meta["project"],
                         var_meta["dataset"],
                         var_meta["exp"],
                         var_meta["ensemble"],
                         "rwr"])

    filename = get_plot_filename(basename, cfg)
    return filename
示例#13
0
def plot_table(dataframe, ancestors, cfg):
    """Render pandas table as a matplotlib figure."""
    fig, axes = plt.subplots()
    pd.plotting.table(axes, dataframe.reset_index().round(2))
    axes.set_axis_off()

    filename = get_plot_filename('table', cfg)
    fig.savefig(filename, bbox_inches='tight')

    caption = "Bias and change for each variable"
    log_provenance(filename, ancestors, caption, cfg)
示例#14
0
def main(diag_config):
    """
    Diagnostic function to compare the zonal turnover time.

    Argument:
    --------
        diag_config - nested dictionary of metadata
    """
    model_data_dict = group_metadata(diag_config['input_data'].values(),
                                     'dataset')

    fig_config = _get_fig_config(diag_config)
    zonal_tau_mod = {}
    for model_name, model_dataset in model_data_dict.items():
        zonal_tau_mod[model_name] = {}
        ctotal = _load_variable(model_dataset, 'ctotal')
        gpp = _load_variable(model_dataset, 'gpp')
        zonal_tau_mod[model_name] = _calc_zonal_tau(gpp, ctotal, fig_config)

    zonal_tau_obs = _get_obs_data_zonal(diag_config)

    obs_var = diag_config.get('obs_variable')[0]
    tau_obs = zonal_tau_obs[obs_var]
    base_name = '{title}_{source_label}_{grid_label}z'.format(
        title=tau_obs.long_name,
        source_label=diag_config['obs_info']['source_label'],
        grid_label=diag_config['obs_info']['grid_label'])

    provenance_record = _get_provenance_record(
        "Comparison of latitudinal (zonal) variations of observation-based and"
        " modelled ecosystem carbon turnover time. The zonal turnover time is"
        " calculated as the ratio of zonal `ctotal` and `gpp`. Reproduces "
        " figure 2a and 2b in Carvalhais et al. (2014).", ['mean', 'perc'],
        ['zonal'], _get_ancestor_files(diag_config, obs_var))

    if diag_config['write_netcdf']:
        model_cubes = [
            c for c in zonal_tau_mod.values() if isinstance(c, iris.cube.Cube)
        ]
        obs_cubes = [
            c for c in zonal_tau_obs.values() if isinstance(c, iris.cube.Cube)
        ]
        netcdf_path = get_diagnostic_filename(base_name, diag_config)
        save_cubes = iris.cube.CubeList(model_cubes + obs_cubes)
        iris.save(save_cubes, netcdf_path)
        with ProvenanceLogger(diag_config) as provenance_logger:
            provenance_logger.log(netcdf_path, provenance_record)

    if diag_config['write_plots']:
        plot_path = get_plot_filename(base_name, diag_config)
        _plot_zonal_tau(plot_path, zonal_tau_mod, zonal_tau_obs, diag_config)
        with ProvenanceLogger(diag_config) as provenance_logger:
            provenance_logger.log(plot_path, provenance_record)
示例#15
0
def _plot(cfg, cube, dataset_name, tcr):
    """Create scatterplot of temperature anomaly vs. time."""
    if not cfg.get('plot', True):
        return (None, None)
    logger.debug("Plotting temperature anomaly vs. time for '%s'",
                 dataset_name)
    (_, axes) = plt.subplots()

    # Plot data
    x_data = np.arange(cube.shape[0])
    y_data = cube.data
    axes.scatter(x_data, y_data, color='b', marker='o')

    # Plot lines
    line_kwargs = {'color': 'k', 'linewidth': 1.0, 'linestyle': '--'}
    axes.axhline(tcr, **line_kwargs)
    axes.axvline(START_YEAR_IDX, **line_kwargs)
    axes.axvline(END_YEAR_IDX, **line_kwargs)

    # Appearance
    units_str = (cube.units.symbol
                 if cube.units.origin is None else cube.units.origin)
    axes.set_title(dataset_name)
    axes.set_xlabel('Years after experiment start')
    axes.set_ylabel(f'Temperature anomaly / {units_str}')
    axes.set_ylim([x_data[0] - 1, x_data[-1] + 1])
    axes.set_ylim([-1.0, 7.0])
    axes.text(0.0, tcr + 0.1, 'TCR = {:.1f} {}'.format(tcr, units_str))

    # Save cube
    netcdf_path = get_diagnostic_filename(dataset_name, cfg)
    io.iris_save(cube, netcdf_path)

    # Save plot
    plot_path = get_plot_filename(dataset_name, cfg)
    plt.savefig(plot_path, **cfg['savefig_kwargs'])
    logger.info("Wrote %s", plot_path)
    plt.close()

    # Provenance
    provenance_record = get_provenance_record(
        f"Time series of the global mean surface air temperature anomaly "
        f"(relative to the linear fit of the pre-industrial control run) of "
        f"{dataset_name} for the 1% CO2 increase per year experiment. The "
        f"horizontal dashed line indicates the transient climate response "
        f"(TCR) defined as the 20 year average temperature anomaly centered "
        f"at the time of CO2 doubling (vertical dashed lines).")
    provenance_record.update({
        'plot_file': plot_path,
        'plot_types': ['times'],
    })

    return (netcdf_path, provenance_record)
def plot_map_rel_biases(cfg, cube_dict):
    """Plot global maps showing relative biases of datasets."""
    logger.info("Creating relative bias map plots")
    for (key_1, key_2) in itertools.permutations(cube_dict, 2):
        logger.debug("Plotting relative bias ('%s' - '%s') / '%s'", key_1,
                     key_2, key_2)
        cube_1 = cube_dict[key_1]
        cube_2 = cube_dict[key_2]
        attrs_1 = cube_1.attributes
        attrs_2 = cube_2.attributes
        alias_1 = _get_alias(cfg, key_1)
        alias_2 = _get_alias(cfg, key_2)

        # Mask cube to avoid division by zero
        cube_2 = _mask_cube(cube_2)

        # Plot
        bias_cube = cube_1.copy()
        bias_cube.data = (cube_1.data - cube_2.data) / cube_2.data
        plot_kwargs = {
            'cbar_label': f"relative change in {attrs_1['tag']} / 1",
            'cmap': 'bwr',
        }
        plot_kwargs.update(get_plot_kwargs(cfg, 'plot_map_rel_biases'))
        _get_map_plot_func(cfg)(bias_cube, **plot_kwargs)

        # Plot appearance
        title = _get_title(cfg,
                           alias_1,
                           attrs_1,
                           alias_2,
                           attrs_2,
                           op_type='rel_bias')
        plt.title(title)
        process_pyplot_kwargs(cfg, 'plot_map_rel_biases')

        # Write minimum and maximum
        logger.debug("Minimum of '%s': %.2f", title, bias_cube.data.min())
        logger.debug("Maximum of '%s': %.2f", title, bias_cube.data.max())

        # Save plot
        plot_path = get_plot_filename(f'map_rel_bias_{key_1}-{key_2}', cfg)
        plt.savefig(plot_path, **get_savefig_kwargs(cfg))
        logger.info("Wrote %s", plot_path)
        plt.close()

        # Provenance
        _write_map_provenance(cfg, bias_cube, plot_path, title, attrs_1,
                              attrs_2)

        # Add to global DataFrame
        _add_correlation_information(cfg, title, bias_cube)
def main(cfg):
    # just load the pre-processed anomlies, and plot them

    # first read them in, pop into dictionaries keyed by model name
    # group by project first (CMIP5, CMIP6, UKCP)
    projects = group_metadata(cfg["input_data"].values(), "project")

    results = {}
    for p in projects:
        results[p] = {}
        if p == "UKCP18":
            # loop over ensembles
            models = group_metadata(projects[p], "ensemble")
        else:
            # loop over datasets
            models = group_metadata(projects[p], "dataset")

        for m in models:
            if len(models[m]) > 1:
                raise ValueError("Too many bits of data")
            fname = models[m][0]["filename"]
            data = iris.load_cube(fname)
            results[p][m] = data.data.item()

    # plot and save the results
    for p in projects:
        # use pandas to create data for a csv file
        results_df = pd.DataFrame.from_dict(results[p], orient='index')
        # save data as csv
        results_df.to_csv(get_diagnostic_filename(f"{p}_global_tas_anom", cfg,
                                                  "csv"),
                          header=False)

        # get list of models
        models = results[p].keys()
        # and corresponding values
        vals = [results[p][m] for m in models]

        fig, ax = plt.subplots(figsize=(12.8, 9.6))

        # plot bar chart
        y_pos = np.arange(len(models))
        colors = np.empty(len(models, ), dtype=str)
        colors[::2] = 'r'
        colors[1::2] = 'b'
        ax.barh(y_pos, vals, color=colors)
        ax.set_yticks(y_pos, labels=models)

        plot_fname = get_plot_filename(f'{p}_global_anomaly', cfg)
        fig.savefig(plot_fname)
        plt.tight_layout()
        plt.close(fig)
def plot_data(cfg, all_data, metadata):
    """Create barplot."""
    if not cfg['write_plots']:
        return None
    logger.debug("Plotting barplot")
    (_, axes) = plt.subplots(figsize=(8, 4))

    # Plot
    all_pos = []
    x_labels = []
    offset = 0.0
    for (label, xy_data) in all_data.items():
        if cfg.get('sort_ascending'):
            sort_idx = np.argsort(xy_data[1])
        elif cfg.get('sort_descending'):
            sort_idx = np.argsort(xy_data[1])[::-1]
        else:
            sort_idx = np.arange(len(xy_data[1]))
        xy_data = (xy_data[0][sort_idx], xy_data[1][sort_idx])
        pos = np.arange(len(xy_data[0])) + offset + 0.5
        axes.bar(pos, xy_data[1], align='center', label=label)
        all_pos.extend(pos)
        x_labels.extend(xy_data[0])
        offset += len(pos) + 1.0

    # Plot appearance
    axes.set_title(metadata['long_name'])
    axes.set_xticks(all_pos)
    axes.set_xticklabels(x_labels, rotation=45.0, ha='right', size=7.0)
    axes.set_ylabel(f"{metadata['var_name']} / {metadata['units']}")
    axes.set_ylim(cfg.get('y_range'))
    if 'label_attribute' in cfg:
        axes.legend(loc='upper right')
    if cfg.get('value_labels'):
        for rect in axes.patches:
            axes.text(rect.get_x() + rect.get_width() / 2.0,
                      rect.get_height(),
                      "{:.1f}".format(rect.get_height()),
                      ha='center',
                      va='bottom',
                      size=5.0)

    # Save plot
    plot_path = get_plot_filename(metadata['var_name'], cfg)
    plt.savefig(plot_path,
                orientation='landscape',
                bbox_inches='tight',
                dpi=300)
    logger.info("Wrote %s", plot_path)
    plt.close()
    return plot_path
示例#19
0
def _save_fig(cfg, basename, legend=None):
    """Save matplotlib figure."""
    path = get_plot_filename(basename, cfg)
    if legend is None:
        legend = []
    else:
        legend = [legend]
    FIG.savefig(path,
                additional_artists=legend,
                bbox_inches='tight',
                orientation='landscape')
    logger.info("Wrote %s", path)
    AXES.cla()
    return path
示例#20
0
def _get_plot_filename(var_meta, cfg, label):
    """Return an output filename for plots."""
    basename = "_".join([
        var_meta["project"],
        var_meta["dataset"],
        var_meta["exp"],
        var_meta["ensemble"],
        label,
        str(var_meta["start_year"]),
        str(var_meta["end_year"]),
    ])

    filename = get_plot_filename(basename, cfg)
    return filename
示例#21
0
def plot_data(cfg, hist_cubes, pi_cubes, ecs_cube):
    """Plot data."""
    if not cfg['write_plots']:
        return None
    x_data = []
    y_data = []
    dataset_names = []
    plot_kwargs = []

    # Collect data
    for dataset in hist_cubes:
        ecs = ecs_cube.extract(iris.Constraint(dataset=dataset))
        if ecs is None:
            raise ValueError(f"No ECS data for '{dataset}' available")

        # Historical data
        x_data.append(ecs.data)
        y_data.append(hist_cubes[dataset].data)
        dataset_names.append(dataset)
        plot_kwargs.append({
            'label': dataset,
            'linestyle': 'none',
            'markersize': 10,
        })

        # PiControl data
        x_data.append(ecs.data)
        y_data.append(pi_cubes[dataset].data)
        dataset_names.append(dataset)
        plot_kwargs.append({
            'label': '_' + dataset,
            'linestyle': 'none',
            'markersize': 6,
        })

    # Plot data
    path = get_plot_filename('ch09_fig09_42a', cfg)
    plot.multi_dataset_scatterplot(
        x_data,
        y_data,
        dataset_names,
        path,
        plot_kwargs=plot_kwargs,
        save_kwargs=cfg.get('save', {}),
        axes_functions=cfg.get('axes_functions', {}),
        dataset_style_file=cfg.get('dataset_style'),
        mpl_style_file=cfg.get('matplotlib_style'),
    )
    return path
示例#22
0
def visualize_and_save_weights(weights: 'xr.DataArray', cfg: dict,
                               ancestors: list):
    """Visualize weights."""
    label = 'Weights'

    filename_plot = get_plot_filename('weights', cfg)

    barplot(weights, label, filename_plot)

    filename_data = get_diagnostic_filename('weights', cfg, extension='nc')
    weights.to_netcdf(filename_data)

    caption = 'Weights'
    log_provenance(caption, filename_plot, cfg, ancestors)
    log_provenance(caption, filename_data, cfg, ancestors)
def plot_data(cfg, cube):
    """Create scatterplot for cube."""
    if not cfg['write_plots']:
        return None
    logger.debug("Plotting scatterplot for cube %s",
                 cube.summary(shorten=True))
    (_, axes) = plt.subplots()
    project = cube.attributes.get('project')

    # Plot
    for (idx, dataset_name) in enumerate(cube.coord('dataset').points):
        style = plot.get_dataset_style(dataset_name, cfg.get('dataset_style'))
        y_data = cube.extract(iris.Constraint(dataset=dataset_name)).data
        axes.plot(idx + 1,
                  y_data,
                  marker=style['mark'],
                  linestyle='none',
                  markeredgecolor=style['color'],
                  markerfacecolor=style['facecolor'],
                  label=dataset_name)

    # Plot appearance
    title = cube.long_name
    if project is not None:
        title += f' for {project}'
    axes.set_title(title)
    axes.tick_params(axis='x',
                     which='both',
                     bottom=False,
                     top=False,
                     labelbottom=False)
    axes.set_ylabel(f"{cube.var_name.upper()} / {cube.units}")
    axes.set_ylim(cfg.get('y_range'))
    legend = axes.legend(loc='center left',
                         bbox_to_anchor=[1.05, 0.5],
                         borderaxespad=0.0,
                         ncol=2)

    # Save plot
    plot_path = get_plot_filename(cube.var_name, cfg)
    plt.savefig(plot_path,
                orientation='landscape',
                bbox_inches='tight',
                additional_artists=[legend])
    logger.info("Wrote %s", plot_path)
    plt.close()
    return plot_path
示例#24
0
def plot_data(cfg, all_data, metadata):
    """Create barplot."""
    logger.debug("Plotting barplot")
    (_, axes) = plt.subplots(**cfg.get('subplots_kwargs', {}))

    # Plot
    all_pos = []
    x_labels = []
    offset = 0.0
    all_data = _get_ordered_dict(cfg, all_data)
    for (label, xy_data) in all_data.items():
        xy_data = (xy_data[0], xy_data[1])
        pos = np.arange(len(xy_data[0])) + offset + 0.5
        bars = axes.bar(pos, xy_data[1], align='center', label=label)
        all_pos.extend(pos)
        x_labels.extend(xy_data[0])
        offset += len(pos) + 1.0
        if 'Mean' in xy_data[0]:
            mean_idx = np.nonzero(xy_data[0] == 'Mean')[0][0]
            bars[mean_idx].set_facecolor(
                _adjust_lightness(bars[mean_idx].get_facecolor()[:3]))

    # Plot appearance
    axes.set_title(metadata['long_name'])
    axes.set_xticks(all_pos)
    axes.set_xticklabels(x_labels, rotation=45.0, ha='right', size=4.0)
    axes.tick_params(axis='x', which='major', pad=-5.0)
    axes.set_ylabel(f"{metadata['var_name']} / {metadata['units']}")
    axes.set_ylim(cfg.get('y_range'))
    if 'label_attribute' in cfg:
        axes.legend(loc='upper right')
    if cfg.get('value_labels'):
        for rect in axes.patches:
            axes.text(rect.get_x() + rect.get_width() / 2.0,
                      rect.get_height() + 0.05,
                      "{:.2f}".format(rect.get_height()),
                      rotation=90.0,
                      ha='center',
                      va='bottom',
                      size=5.0)

    # Save plot
    plot_path = get_plot_filename(metadata['var_name'], cfg)
    plt.savefig(plot_path, **cfg['savefig_kwargs'])
    logger.info("Wrote %s", plot_path)
    plt.close()
    return plot_path
示例#25
0
def plot_scatter(tidy_df, ancestors, cfg):
    """Plot bias on one axis and change on the other."""
    grid = sns.relplot(
        data=tidy_df,
        x="Bias (RMSD of all gridpoints)",
        y="Mean change (Future - Reference)",
        hue="dataset",
        col="variable",
        facet_kws=dict(sharex=False, sharey=False),
        kind='scatter',
    )

    filename = get_plot_filename('bias_vs_change', cfg)
    grid.fig.savefig(filename, bbox_inches='tight')

    caption = "Bias and change for each variable"
    log_provenance(filename, ancestors, caption, cfg)
def make_plots(cfg, scenario_tables):
    """Reproduce figure 5 from the paper."""
    # Note that quantile is applied twice! Once to get the pdf's of seasonal
    # tas/pr and once to get the multimodel pdf of the quantile changes
    metadata = cfg['input_data'].values()

    climates = {}
    for name, info in cfg['scenarios'].items():
        climatology = _get_climatology(cfg, name, table=scenario_tables[name])
        climates[name] = climatology

    for year in [2050, 2085]:
        fig, subplots = plt.subplots(2, 2, figsize=(12, 8))

        for row, variable in zip(subplots, ['pr', 'tas']):
            cmip, prov = _cmip_envelope(metadata, variable, year)

            for axes, season in zip(row, ['DJF', 'JJA']):
                percentiles = cmip.percentile.values
                xlocs = np.arange(len(percentiles))

                # Plot the cmip envelope
                seasondata = cmip.sel(season=season)
                for high, low in [[0.9, 0.1], [0.75, 0.25]]:
                    upper = seasondata.quantile(high, dim='multimodel')
                    lower = seasondata.quantile(low, dim='multimodel')
                    axes.fill_between(xlocs, upper, lower, color='k', alpha=.3)
                    axes.set_title(f'{variable} / {season}')

                # Plot the recombined scenarios
                for name, info in cfg['scenarios'].items():
                    if year == info['scenario_year']:
                        climate = climates[name].sel(season=season)[variable]
                        axes.plot(xlocs, climate, lw=3, label=name)

                axes.set_xticks(xlocs)
                axes.set_xticklabels([f'P{100*x:02.0f}' for x in percentiles])
        subplots[0, 0].set_ylabel('change (%)')
        subplots[1, 0].set_ylabel('change (K)')
        subplots[1, 1].legend()
        filename = get_plot_filename(f'local_validation_{year}', cfg)
        fig.suptitle(f'Year: {year}')
        fig.savefig(filename, bbox_inches='tight', dpi=300)
        LOGGER.info("Envelope figure stored as %s", filename)
        with ProvenanceLogger(cfg) as provenance_logger:
            provenance_logger.log(filename, prov)
示例#27
0
def visualize_and_save_performance(performance: 'xr.DataArray', cfg: dict,
                                   ancestors: list):
    """Visualize performance."""
    label = 'RMS error'

    variable_group = performance.variable_group
    filename_plot = get_plot_filename(f'performance_{variable_group}', cfg)

    barplot(performance, label, filename_plot)

    filename_data = get_diagnostic_filename(f'performance_{variable_group}',
                                            cfg,
                                            extension='nc')
    performance.to_netcdf(filename_data)

    caption = f'Performance metric {label} for variable group {variable_group}'
    log_provenance(caption, filename_plot, cfg, ancestors)
    log_provenance(caption, filename_data, cfg, ancestors)
def produce_plots(config, data):
    """Produce all elements of the full plot."""
    ref_line_style = {'linestyle': '-', 'linewidth': 2.}
    fig, axes = setup_figure()
    lines, labels = plot_zonal_mean_errors_ensemble(axes[0, 0],
                                                    data['zonal_mean_errors'],
                                                    ref_line_style)
    plot_equatorial_errors(axes[0, 1], data['equatorial_errors'],
                           ref_line_style)
    plot_zonal_mean_errors_project(axes[1, 0], data['zonal_mean_errors'],
                                   ref_line_style)
    ref_ls, ref_labels = plot_equatorials(axes[1, 1], data['equatorial_ref'],
                                          data['equatorials'], ref_line_style)
    all_lines = ref_ls + lines
    all_labels = ref_labels + labels
    legend = draw_legend(fig, all_lines, all_labels)
    path = get_plot_filename('fig-9-14', config)
    fig.savefig(path, additional_artists=[legend], tight_layout=True)
    return path
def save_results(cfg, cube, basename, ancestor_files):
    """Create a provenance record describing the diagnostic data and plot."""
    basename = basename + '_' + cube.var_name
    provenance = {
        'caption': cube.long_name.replace('\n', ' '),
        'statistics': ['other'],
        'domains': ['global'],
        'authors': ['berg_peter'],
        'references': ['acknow_project'],
        'ancestors': ancestor_files,
    }
    if cfg['write_plots'] and cfg.get('quickplot'):
        plot_file = get_plot_filename(basename, cfg)
        quickplot(cube, plot_file, **cfg['quickplot'])
        provenance['plot_file'] = plot_file
    if cfg['write_netcdf']:
        netcdf_file = get_diagnostic_filename(basename, cfg)
        iris.save(cube, target=netcdf_file)
        with ProvenanceLogger(cfg) as provenance_logger:
            provenance_logger.log(netcdf_file, provenance)
def plot_map(cfg, cube_dict):
    """Plot global maps showing datasets."""
    logger.info("Creating map plots")
    for (key, cube) in cube_dict.items():
        logger.debug("Plotting '%s'", key)
        attrs = cube.attributes

        # Plot
        plot_kwargs = {
            'cbar_label': f"{attrs['tag']} / {cube.units}",
            'cmap': 'YlGn',
        }
        plot_kwargs.update(get_plot_kwargs(cfg, 'plot_map', key=key))
        _get_map_plot_func(cfg)(cube, **plot_kwargs)

        # Plot appearance
        alias = _get_alias(cfg, key)
        title = _get_title(cfg, alias, attrs)
        plt.title(title)
        process_pyplot_kwargs(cfg, 'plot_map')

        # Write minimum and maximum
        logger.debug("Minimum of '%s': %.2f", title, cube.data.min())
        logger.debug("Maximum of '%s': %.2f", title, cube.data.max())

        # Save plot
        plot_path = get_plot_filename(f'map_{key}', cfg)
        plt.savefig(plot_path, **get_savefig_kwargs(cfg))
        logger.info("Wrote %s", plot_path)
        plt.close()

        # Provenance
        _write_map_provenance(cfg, cube, plot_path, title, attrs)

        # Add to global DataFrame
        _add_correlation_information(cfg, title, cube)