Exemple #1
0
def main(cfg):
    """Run the diagnostic."""
    sns.set(**cfg.get('seaborn_settings', {}))
    ecs_file = io.get_ancestor_file(cfg, 'ecs.nc')
    tcr_file = io.get_ancestor_file(cfg, 'tcr.nc')
    ecs_cube = iris.load_cube(ecs_file)
    tcr_cube = iris.load_cube(tcr_file)

    # Project
    if (ecs_cube.attributes.get('project', 'a') != tcr_cube.attributes.get(
            'project', 'b')):
        raise ValueError(
            "ECS and TCR input files have either no 'project' attribute or "
            "differ in it")
    project = ecs_cube.attributes['project']

    # Remove missing data and use equal coordinate
    [ecs_cube, tcr_cube
     ] = iris_helpers.intersect_dataset_coordinates([ecs_cube, tcr_cube])

    # Create plot
    plot_path = plot_data(cfg, ecs_cube, tcr_cube)

    # Write netcdf file
    netcdf_path = write_data(cfg, ecs_cube, tcr_cube)

    # Provenance
    ancestor_files = [ecs_file, tcr_file]
    provenance_record = get_provenance_record(project, ancestor_files)
    provenance_record.update({
        'plot_file': plot_path,
        'plot_types': ['scatter'],
    })
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(netcdf_path, provenance_record)
def test_get_ancestor_file(mock_get_all_ancestors, files, output):
    """Test retrieving of single ancestor file."""
    mock_get_all_ancestors.return_value = files
    if isinstance(output, type):
        with pytest.raises(output):
            io.get_ancestor_file(CFG, pattern='*')
        return
    returned_file = io.get_ancestor_file(CFG, pattern='*')
    assert returned_file == output
Exemple #3
0
def test_get_ancestor_file(mock_logger, mock_get_all_ancestors, files, output,
                           logger):
    """Test retrieving of single ancestor file."""
    mock_get_all_ancestors.return_value = files
    returned_file = io.get_ancestor_file(CFG, pattern='*')
    assert returned_file == output
    if logger:
        mock_logger.warning.assert_called()
    else:
        mock_logger.warning.assert_not_called()
def get_external_cubes(cfg):
    """Get external cubes for psi, ECS and lambda."""
    cubes = iris.cube.CubeList()
    for filename in ('psi.nc', 'ecs.nc', 'lambda.nc'):
        filepath = io.get_ancestor_file(cfg, filename)
        cube = iris.load_cube(filepath)
        cube = cube.extract(
            ih.iris_project_constraint(['OBS'], cfg, negate=True))
        cubes.append(cube)
    cubes = ih.intersect_dataset_coordinates(cubes)
    return (cubes[0], cubes[1], cubes[2])
Exemple #5
0
def main(cfg):
    """Run the diagnostic."""
    sns.set(**cfg.get('seaborn_settings', {}))
    input_data = cfg['input_data'].values()
    project = list(group_metadata(input_data, 'project').keys())
    project = [p for p in project if 'obs' not in p.lower()]
    if len(project) == 1:
        project = project[0]

    # Check if tas is available
    if not variables_available(cfg, ['tas']):
        raise ValueError("This diagnostic needs 'tas' variable")

    # Get ECS data
    ecs_filepath = io.get_ancestor_file(cfg, 'ecs.nc')
    ecs_cube = iris.load_cube(ecs_filepath)

    # Create iris cubes for each dataset
    hist_cubes = {}
    pi_cubes = {}
    for data in input_data:
        name = data['dataset']
        logger.info("Processing %s", name)
        cube = iris.load_cube(data['filename'])

        # Preprocess cubes
        cube.convert_units(cfg.get('tas_units', 'celsius'))
        cube = cube.collapsed(['time'], iris.analysis.MEAN)

        # Save cubes
        if data.get('exp') == 'historical':
            hist_cubes[name] = cube
        elif data.get('exp') == 'piControl':
            pi_cubes[name] = cube
        else:
            pass

    # Plot data
    plot_path = plot_data(cfg, hist_cubes, pi_cubes, ecs_cube)

    # Write netcdf file
    netcdf_path = write_data(cfg, hist_cubes, pi_cubes, ecs_cube)

    # Provenance
    ancestor_files = [d['filename'] for d in input_data]
    ancestor_files.append(ecs_filepath)
    provenance_record = get_provenance_record(project, ancestor_files)
    if plot_path is not None:
        provenance_record.update({
            'plot_file': plot_path,
            'plot_types': ['scatter'],
        })
    with ProvenanceLogger(cfg) as provenance_logger:
        provenance_logger.log(netcdf_path, provenance_record)
def _get_style(dataset_name, cfg):
    """Get style for individual data points."""
    style = plot.get_dataset_style(dataset_name, cfg.get('dataset_style'))
    if not cfg.get('marker_file'):
        return style
    marker_file = os.path.expanduser(cfg['marker_file'])
    if not os.path.isabs(marker_file):
        marker_file = io.get_ancestor_file(cfg, marker_file)
    data_frame = pd.read_csv(marker_file)
    marker_column = cfg['marker_column']
    for column in ('dataset', marker_column):
        if column not in data_frame.columns:
            raise ValueError(
                f"Marker file '{marker_file}' does not contain necessary "
                f"column '{column}'")
    marker = data_frame[marker_column][data_frame['dataset'] == dataset_name]
    if len(marker) != 1:
        raise ValueError(
            f"Expected exactly one entry for marker of '{dataset_name}' in "
            f"file '{marker_file}', got {len(marker):d}")
    style['mark'] = marker.values[0]
    return style