コード例 #1
0
def plot_temperature_anomaly(cfg, tas_cubes, lambda_cube, obs_name):
    """Plot temperature anomaly versus time."""
    for cube in tas_cubes.values():
        cube.data -= np.mean(
            cube.extract(
                iris.Constraint(year=lambda cell: 1961 <= cell <= 1990)).data)

    # Save netcdf file and provenance
    filename = 'temperature_anomaly_{}'.format(obs_name)
    netcdf_path = get_diagnostic_filename(filename, cfg)
    io.save_1d_data(tas_cubes, netcdf_path, 'year', TASA_ATTRS)
    project = _get_project(cfg)
    provenance_record = get_provenance_record(
        "Simulated change in global temperature from {} models (coloured "
        "lines), compared to the global temperature anomaly from the {} "
        "dataset (black dots). The anomalies are relative to a baseline "
        "period of 1961-1990.".format(project, obs_name), ['anomaly'],
        ['times'], _get_ancestor_files(cfg, obs_name))

    # Plot
    if cfg['write_plots']:
        models = lambda_cube.coord('dataset').points

        # Plot lines
        for model in models:
            cube = tas_cubes[model]
            AXES.plot(cube.coord('year').points,
                      cube.data,
                      color=_get_model_color(model, lambda_cube))
        obs_style = plot.get_dataset_style('OBS', 'cox18nature')
        obs_cube = tas_cubes[obs_name]
        AXES.plot(obs_cube.coord('year').points,
                  obs_cube.data,
                  linestyle='none',
                  marker='o',
                  markeredgecolor=obs_style['color'],
                  markerfacecolor=obs_style['color'])

        # Plot appearance
        AXES.set_title('Simulation of global warming record')
        AXES.set_xlabel('Year')
        AXES.set_ylabel('Temperature anomaly / K')
        legend = _get_line_plot_legend()

        # Save plot
        provenance_record['plot_file'] = _save_fig(cfg, filename, legend)

    # Write provenance
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(netcdf_path, provenance_record)
コード例 #2
0
def plot_psi(cfg, psi_cubes, lambda_cube, obs_name):
    """Plot temperature variability metric psi versus time."""
    filename = 'temperature_variability_metric_{}'.format(obs_name)
    netcdf_path = get_diagnostic_filename(filename, cfg)
    io.save_1d_data(psi_cubes, netcdf_path, 'year', PSI_ATTRS)
    project = _get_project(cfg)
    provenance_record = get_provenance_record(
        "Psi metric of variability versus time, from the {0} models "
        "(coloured lines), and the {1} observational data (black circles). "
        "The psi values are calculated for windows of width {2} yr, after "
        "linear de-trending in each window. These {2}-yr windows are shown "
        "for different end times.".format(project, obs_name,
                                          cfg.get('window_length', 55)),
        ['corr', 'var'], ['times'], _get_ancestor_files(cfg, obs_name))

    # Plot
    if cfg['write_plots']:
        models = lambda_cube.coord('dataset').points

        # Plot lines
        for model in models:
            cube = psi_cubes[model]
            AXES.plot(cube.coord('year').points,
                      cube.data,
                      color=_get_model_color(model, lambda_cube))
        obs_style = plot.get_dataset_style('OBS', 'cox18nature')
        obs_cube = psi_cubes[obs_name]
        AXES.plot(obs_cube.coord('year').points,
                  obs_cube.data,
                  linestyle='none',
                  marker='o',
                  markeredgecolor=obs_style['color'],
                  markerfacecolor=obs_style['color'])

        # Plot appearance
        AXES.set_title('Metric of variability versus time')
        AXES.set_xlabel('Year')
        AXES.set_ylabel(r'$\Psi$ / K')
        legend = _get_line_plot_legend()

        # Save plot
        provenance_record['plot_file'] = _save_fig(cfg, filename, legend)

    # Write provenance
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(netcdf_path, provenance_record)
コード例 #3
0
def _write_scalar_data(data, ancestor_files, cfg, description=None):
    """Write scalar data for multiple datasets."""
    var_attrs = [
        {
            'short_name': 'ecs',
            'long_name': 'Equilibrium Climate Sensitivity (Gregory method)',
            'units': cf_units.Unit('K'),
        },
        {
            'short_name': 'lambda',
            'long_name': 'Climate Feedback Parameter',
            'units': cf_units.Unit('W m-2 K-1'),
        },
    ]
    global_attrs = {'project': list(cfg['input_data'].values())[0]['project']}
    if RTMT_DATASETS:
        global_attrs['net_toa_radiation'] = RTMT_TEXT.format(RTMT_DATASETS)
    for (idx, var_attr) in enumerate(var_attrs):
        caption = '{long_name} for multiple climate models'.format(**var_attr)
        if description is not None:
            filename = '{}_{}'.format(var_attr['short_name'],
                                      description.replace(' ', '_'))
            attributes = {'Description': description}
            caption += f' for {description}.'
        else:
            filename = var_attr['short_name']
            attributes = {}
            caption += '.'
        attributes.update(global_attrs)
        path = get_diagnostic_filename(filename, cfg)
        if not data[idx]:
            raise ValueError(f"Cannot write file {path}, no data for variable "
                             f"'{var_attr['short_name']}' given")

        # Scalar data
        if NDIMS['rad'] == 1:
            io.save_scalar_data({d: data[idx][d].data
                                 for d in data[idx]},
                                path,
                                var_attr,
                                attributes=attributes)

        # 1D data
        elif NDIMS['rad'] == 2:
            io.save_1d_data(data[idx],
                            path,
                            COORDS['rad'][0],
                            var_attr,
                            attributes=attributes)

        # Higher dimensions
        else:
            logger.info(
                "Writing netcdf summary file including ECS and feedback "
                "parameters for all datasets is not supported for %iD data "
                "yet", NDIMS['rad'])
            return

        # Provenance
        provenance_record = _get_provenance_record(caption)
        provenance_record['ancestors'] = ancestor_files
        with ProvenanceLogger(cfg) as provenance_logger:
            provenance_logger.log(path, provenance_record)
コード例 #4
0
def test_save_1d_data(mock_logger, mock_save, var_attrs, attrs):
    """Test saving of 1 dimensional data."""
    coord_name = 'inclination'
    data = [
        np.ma.masked_invalid([1.0, np.nan, -1.0]),
        np.arange(2.0) + 100.0,
        np.ma.masked_invalid([33.0, 22.0, np.nan, np.nan, -77.0]),
    ]
    coords = [
        iris.coords.DimCoord(np.arange(3.0) - 3.0, long_name=coord_name),
        iris.coords.DimCoord(np.arange(2.0) + 2.0, long_name=coord_name),
        iris.coords.DimCoord(np.array([-7.0, -3.0, -2.71, 3.0, 314.15]),
                             long_name=coord_name),
    ]
    cubes = OrderedDict([
        ('model1',
         iris.cube.Cube(data[0],
                        var_name='xy',
                        units='kg',
                        attributes={'hi': '!'},
                        dim_coords_and_dims=[(coords[0], 0)])),
        ('model2',
         iris.cube.Cube(data[1],
                        var_name='zr',
                        units='1',
                        attributes={},
                        dim_coords_and_dims=[(coords[1], 0)])),
        ('model3',
         iris.cube.Cube(data[2],
                        var_name='wa',
                        units='unknown',
                        attributes={'very': 'long cube'},
                        dim_coords_and_dims=[(coords[2], 0)])),
    ])
    dataset_dim = iris.coords.AuxCoord(list(cubes.keys()), long_name='dataset')
    dim_1 = coords[0].copy([-7.0, -3.0, -2.71, -2.0, -1.0, 2.0, 3.0, 314.15])
    output_data = np.ma.masked_invalid(
        [[np.nan, 1.0, np.nan, np.nan, -1.0, np.nan, np.nan, np.nan],
         [np.nan, np.nan, np.nan, np.nan, np.nan, 100.0, 101.0, np.nan],
         [33.0, 22.0, np.nan, np.nan, np.nan, np.nan, np.nan, -77.0]])
    output_dims = [(dataset_dim, 0), (dim_1, 1)]

    # Without cubes
    with pytest.raises(ValueError):
        io.save_1d_data({}, PATH, coord_name, var_attrs, attrs)
    mock_logger.error.assert_not_called()
    assert not mock_save.called
    mock_logger.reset_mock()
    mock_save.reset_mock()

    # With cubes
    if 'units' not in var_attrs:
        with pytest.raises(ValueError):
            io.save_1d_data(cubes, PATH, coord_name, var_attrs, attrs)
        mock_logger.error.assert_called_once()
        assert not mock_save.called
        return
    io.save_1d_data(cubes, PATH, coord_name, var_attrs, attrs)
    iris_var_attrs = deepcopy(var_attrs)
    iris_var_attrs['var_name'] = iris_var_attrs.pop('short_name')
    new_cube = iris.cube.Cube(output_data,
                              aux_coords_and_dims=output_dims,
                              attributes=attrs,
                              **iris_var_attrs)
    mock_logger.error.assert_not_called()
    assert mock_save.call_args_list == [mock.call(new_cube, PATH)]