예제 #1
0
def test_run_diagnostic_log_level(tmp_path, monkeypatch, flag):
    """Test if setting the log level from the command line works."""
    settings = create_settings(tmp_path)
    settings_file = write_settings(settings)

    monkeypatch.setattr(sys, 'argv', ['', flag, 'error', settings_file])

    with shared.run_diagnostic():
        assert shared._base.logger.getEffectiveLevel() == logging.ERROR
예제 #2
0
def test_rerun_diagnostic_raises(tmp_path, monkeypatch):
    """Test if re-running the diagnostic script fails when output exists."""
    settings = create_settings(tmp_path)
    settings_file = write_settings(settings)

    create_run_content(settings)

    monkeypatch.setattr(sys, 'argv', ['', settings_file])

    with pytest.raises(FileExistsError):
        with shared.run_diagnostic():
            pass
예제 #3
0
def test_run_diagnostic(tmp_path, monkeypatch):

    settings = create_settings(tmp_path)
    settings_file = write_settings(settings)

    monkeypatch.setattr(sys, 'argv', ['', settings_file])

    # Create files created by ESMValCore
    for filename in ('log.txt', 'profile.bin', 'resource_usage.txt'):
        file = Path(settings['run_dir']) / filename
        file.touch()

    with shared.run_diagnostic() as cfg:
        assert 'example_setting' in cfg
예제 #4
0
def test_rerun_diagnostic_flag(tmp_path, monkeypatch, flag):
    """Test if re-running the diagnostic script works."""
    exist = flag in {'-i', '--ignore'}

    settings = create_settings(tmp_path)
    settings_file = write_settings(settings)

    create_run_content(settings)

    monkeypatch.setattr(sys, 'argv', ['', flag, settings_file])

    with shared.run_diagnostic():
        assert not (Path(settings['run_dir']) /
                    'diagnostic_provenance.yml').exists()
        for file in (
                Path(settings['run_dir']) / 'tmp.nc',
                Path(settings['work_dir']) / 'example_output.txt',
                Path(settings['plot_dir']) / 'example_output.txt',
        ):
            assert file.exists() == exist
예제 #5
0
            # run the metric
            metrics = metric_function(run_obj)
            # check duplication
            duplicate_metrics = list(
                set(all_metrics.keys()) & set(metrics.keys()))
            if duplicate_metrics:
                raise AssertionError('Duplicate Metrics ' +
                                     str(duplicate_metrics))
            all_metrics.update(metrics)

        # write metrics to file
        with open(os.path.join(run_obj['dump_output'], 'metrics.csv'),
                  'w') as file_handle:
            writer = csv.writer(file_handle)
            for metric in all_metrics.items():
                writer.writerow(metric)

    # multimodel functions
    if hasattr(area_package, 'multi_functions'):
        for multi_function in area_package.multi_functions:
            multi_function(run_obj)
    else:
        logger.info('# Area has no multi functions.')


if __name__ == '__main__':

    with run_diagnostic() as config:
        run_area(config)
    group_metadata,
)

import os
import logging

logger = logging.getLogger(os.path.basename(__file__))


def main(cfg):
    # The config object is a dict of all the metadata from the pre-processor
    logger.debug(cfg)

    projects = group_metadata(cfg["input_data"].values(), "project")

    for k, p in projects.items():
        m_list = set()
        for ds in p:
            if k == "CORDEX":
                ds_str = f"{ds['driver']} - {ds['dataset']}"
            else:
                ds_str = ds["dataset"]
            m_list.add(ds_str)
        print(f"{k} - {len(m_list)} models:")
        print(m_list)


if __name__ == "__main__":
    with run_diagnostic() as cfg:
        main(cfg)
예제 #7
0
def main():
    """Run Eady Growth Rate diagnostic."""
    with run_diagnostic() as config:
        EadyGrowthRate(config).compute()
예제 #8
0
        if model == datainfo.get('reference_dataset', None):
            update_reference(my_catch, model, rivervalues, var)
        else:
            update_plotdata(identifier, plotdata, rivervalues, var)

        # Append to cubelist for temporary output
        if model not in allcubes.keys():
            allcubes[model] = []
        allcubes[model].append(cube)

    # Write regridded and temporal aggregated netCDF data files (one per model)
    # to do: update attributes, something fishy with unlimited dimension
    for model, mcube in allcubes.items():
        filepath = os.path.join(cfg[diag.names.WORK_DIR],
                                '_'.join(['postproc', model]) + '.nc')
        if cfg[diag.names.WRITE_NETCDF]:
            iris.save(mcube, filepath)
            logger.info("Writing %s", filepath)

    # Write plotdata as ascii files for user information
    write_plotdata(cfg, plotdata, my_catch)

    # Plot catchment data
    make_catchment_plots(cfg, plotdata, my_catch)


if __name__ == '__main__':

    with diag.run_diagnostic() as config:
        main(config)