Exemple #1
0
def main(argv=None):
    """ Calculate the UV index using the data
    in the input cubes."""
    parser = ArgParser(description="Calculates the UV index.")
    parser.add_argument("radiation_flux_upward",
                        metavar="RADIATION_FLUX_UPWARD",
                        help="Path to a NetCDF file of radiation flux "
                        "in uv upward at surface.")
    parser.add_argument("radiation_flux_downward",
                        metavar="RADIATION_FLUX_DOWNWARD",
                        help="Path to a NetCDF file of radiation flux "
                        "in uv downward at surface.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF")

    args = parser.parse_args(args=argv)

    # Load Cube
    rad_uv_up = load_cube(args.radiation_flux_upward)
    rad_uv_down = load_cube(args.radiation_flux_downward)

    # Process Cube
    result = process(rad_uv_up, rad_uv_down)
    # Save Cube
    save_netcdf(result, args.output_filepath)
def main(argv=None):
    r"""
    Load arguments and run ProbabilitiesFromPercentiles plugin.

    Plugin generates probabilities at a fixed threshold (height) from a set of
    (height) percentiles.

    Example:

        Snow-fall level::

            Reference field: Percentiled snow fall level (m ASL)
            Other field: Orography (m ASL)

            300m ----------------- 30th Percentile snow fall level
            200m ----_------------ 20th Percentile snow fall level
            100m ---/-\----------- 10th Percentile snow fall level
            000m --/---\----------  0th Percentile snow fall level
            ______/     \_________ Orogaphy

        The orography heights are compared against the heights that correspond
        with percentile values to find the band in which they fall, then
        interpolated linearly to obtain a probability of snow level at / below
        the ground surface.
    """
    parser = ArgParser(
        description="Calculate probability from a percentiled field at a "
        "2D threshold level.  Eg for 2D percentile levels at different "
        "heights, calculate probability that height is at ground level, where"
        " the threshold file contains a 2D topography field.")
    parser.add_argument("percentiles_filepath",
                        metavar="PERCENTILES_FILE",
                        help="A path to an input NetCDF file containing a "
                        "percentiled field")
    parser.add_argument("threshold_filepath",
                        metavar="THRESHOLD_FILE",
                        help="A path to an input NetCDF file containing a "
                        "threshold value at which probabilities should be "
                        "calculated.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF")
    parser.add_argument("output_diagnostic_name",
                        metavar="OUTPUT_DIAGNOSTIC_NAME",
                        type=str,
                        help="Name for data in output file e.g. "
                        "probability_of_snow_falling_level_below_ground_level")
    args = parser.parse_args(args=argv)

    # Load Cubes
    threshold_cube = load_cube(args.threshold_filepath)
    percentiles_cube = load_cube(args.percentiles_filepath)

    # Process Cubes
    probability_cube = process(percentiles_cube, threshold_cube,
                               args.output_diagnostic_name)

    # Save Cubes
    save_netcdf(probability_cube, args.output_filepath)
Exemple #3
0
def main(argv=None):
    """Load in arguments and get going."""
    parser = ArgParser(
        description="Calculate the continuous falling snow level ")
    parser.add_argument("temperature", metavar="TEMPERATURE",
                        help="Path to a NetCDF file of air temperatures at"
                        " heights (m) at the points for which the continuous "
                        "falling snow level is being calculated.")
    parser.add_argument("relative_humidity", metavar="RELATIVE_HUMIDITY",
                        help="Path to a NetCDF file of relative_humidities at"
                        " heights (m) at the points for which the continuous "
                        "falling snow level is being calculated.")
    parser.add_argument("pressure", metavar="PRESSURE",
                        help="Path to a NetCDF file of air pressures at"
                        " heights (m) at the points for which the continuous "
                        "falling snow level is being calculated.")
    parser.add_argument("orography", metavar="OROGRAPHY",
                        help="Path to a NetCDF file containing "
                        "the orography height in m of the terrain "
                        "over which the continuous falling snow level is "
                        "being calculated.")
    parser.add_argument("land_sea_mask", metavar="LAND_SEA_MASK",
                        help="Path to a NetCDF file containing "
                        "the binary land-sea mask for the points "
                        "for which the continuous falling snow level is "
                        "being calculated. Land points are set to 1, sea "
                        "points are set to 0.")
    parser.add_argument("output_filepath", metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF")
    parser.add_argument("--precision", metavar="NEWTON_PRECISION",
                        default=0.005, type=float,
                        help="Precision to which the wet bulb temperature "
                        "is required: This is used by the Newton iteration "
                        "default value is 0.005")
    parser.add_argument("--falling_level_threshold",
                        metavar="FALLING_LEVEL_THRESHOLD",
                        default=90.0, type=float,
                        help=("Cutoff threshold for the wet-bulb integral used"
                              " to calculate the falling snow level. This "
                              "threshold indicates the level at which falling "
                              "snow is deemed to have melted to become rain. "
                              "The default value is 90.0, an empirically "
                              "derived value."))
    args = parser.parse_args(args=argv)

    # Load Cubes
    temperature = load_cube(args.temperature, no_lazy_load=True)
    relative_humidity = load_cube(args.relative_humidity, no_lazy_load=True)
    pressure = load_cube(args.pressure, no_lazy_load=True)
    orog = load_cube(args.orography, no_lazy_load=True)
    land_sea = load_cube(args.land_sea_mask, no_lazy_load=True)

    # Process Cube
    result = process(temperature, relative_humidity, pressure, orog,
                     land_sea, args.precision, args.falling_level_threshold)

    # Save Cube
    save_netcdf(result, args.output_filepath)
Exemple #4
0
    def test_argparser_compulsory_args_has_profile(self):
        """Test that creating an ArgParser instance with the compulsory
        arguments adds the profiling options."""

        expected_profile_options = ['profile', 'profile_file']
        parser = ArgParser(central_arguments=None, specific_arguments=None)
        args = parser.parse_args()
        args = vars(args).keys()
        self.assertCountEqual(args, expected_profile_options)
Exemple #5
0
    def test_adding_empty_argument_list_does_nothing(self):
        """Test that attempting to add an empty list of argspecs to the
        ArgParser does not add any new arguments."""

        args_to_add = []

        # add a specific (optional) argument - ensures that even if there are
        # no compulsory arguments, we have something...
        # adding arguments after calling parse_args/args will do nothing, so
        # instead create 2 instances:
        parser1 = ArgParser(central_arguments=None,
                            specific_arguments=[[['--optional'], {}]])

        parser2 = ArgParser(central_arguments=None,
                            specific_arguments=[[['--optional'], {}]])

        parser2.add_arguments(args_to_add)
        self.assertEqual(parser1.parse_args(), parser2.parse_args())
Exemple #6
0
def main(argv=None):
    """Parser to accept input data and an output destination before invoking
    the weather symbols plugin.
    """

    diagnostics = interrogate_decision_tree('high_resolution')
    n_files = len(diagnostics)
    dlist = (' - {}\n' * n_files)

    diagnostics_global = interrogate_decision_tree('global')
    n_files_global = len(diagnostics_global)
    dlist_global = (' - {}\n' * n_files_global)

    parser = ArgParser(
        description='Calculate gridded weather symbol codes.\nThis plugin '
        'requires a specific set of input diagnostics, where data\nmay be in '
        'any units to which the thresholds given below can\nbe converted:\n' +
        dlist.format(*diagnostics) + '\n\n or for global data\n\n' +
        dlist_global.format(*diagnostics_global),
        formatter_class=RawTextHelpFormatter)

    parser.add_argument(
        'input_filepaths',
        metavar='INPUT_FILES',
        nargs="+",
        help='Paths to files containing the required input diagnostics.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')
    parser.add_argument("--wxtree",
                        metavar="WXTREE",
                        default="high_resolution",
                        choices=["high_resolution", "global"],
                        help="Weather Code tree.\n"
                        "Choices are high_resolution or global.\n"
                        "Default=high_resolution.",
                        type=str)

    args = parser.parse_args(args=argv)

    # Load Cube
    cubes = load_cubelist(args.input_filepaths, no_lazy_load=True)
    required_number_of_inputs = n_files
    if args.wxtree == 'global':
        required_number_of_inputs = n_files_global
    if len(cubes) != required_number_of_inputs:
        msg = ('Incorrect number of inputs: files {} gave {} cubes' +
               ', {} required').format(args.input_filepaths, len(cubes),
                                       required_number_of_inputs)
        raise argparse.ArgumentTypeError(msg)

    # Process Cube
    result = process(cubes, args.wxtree)

    # Save Cube
    save_netcdf(result, args.output_filepath)
def main(argv=None):
    """Load in arguments for wind-gust diagnostic.
    Wind-gust and Wind-speed data should be supplied along with the required
    percentile value. The wind-gust diagnostic will be the Max of the specified
    percentile data.
    Currently
        * Typical gusts is
          MAX(wind-gust(50th percentile),wind-speed(95th percentile))
        * Extreme gust is
          MAX(wind-gust(95th percentile),wind-speed(100th percentile))
    If no percentile values are supplied the code defaults
    to values for Typical gusts.
    """
    parser = ArgParser(
        description="Calculate revised wind-gust data using a specified "
        "percentile of wind-gust data and a specified percentile "
        "of wind-speed data through the WindGustDiagnostic plugin. "
        "The wind-gust diagnostic will be the Max of the specified "
        "percentile data."
        "Currently Typical gusts is "
        "MAX(wind-gust(50th percentile),wind-speed(95th percentile))"
        "and Extreme gust is "
        "MAX(wind-gust(95th percentile),wind-speed(100th percentile)). "
        "If no percentile values are supplied the code defaults "
        "to values for Typical gusts.")
    parser.add_argument("input_filegust",
                        metavar="INPUT_FILE_GUST",
                        help="A path to an input Wind Gust Percentile"
                        " NetCDF file")
    parser.add_argument("input_filews",
                        metavar="INPUT_FILE_WINDSPEED",
                        help="A path to an input Wind Speed Percentile"
                        " NetCDF file")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF")
    parser.add_argument("--percentile_gust",
                        metavar="PERCENTILE_GUST",
                        default="50.0",
                        help="Percentile of wind-gust required."
                        " Default=50.0",
                        type=float)
    parser.add_argument("--percentile_ws",
                        metavar="PERCENTILE_WIND_SPEED",
                        default="95.0",
                        help="Percentile of wind-speed required."
                        " Default=95.0",
                        type=float)

    args = parser.parse_args(args=argv)
    cube_wg = load_cube(args.input_filegust)
    cube_ws = load_cube(args.input_filews)
    result = (WindGustDiagnostic(args.percentile_gust,
                                 args.percentile_ws).process(cube_wg, cube_ws))
    save_netcdf(result, args.output_filepath)
Exemple #8
0
def main(argv=None):
    """Invoke data extraction."""

    parser = ArgParser(description='Extracts subset of data from a single '
                       'input file, subject to equality-based constraints.')
    parser.add_argument('input_file',
                        metavar='INPUT_FILE',
                        help="File containing a dataset to extract from.")
    parser.add_argument('output_file',
                        metavar='OUTPUT_FILE',
                        help="File to write the extracted dataset to.")
    parser.add_argument('constraints',
                        metavar='CONSTRAINTS',
                        nargs='+',
                        help='The constraint(s) to be applied.  These must be'
                        ' of the form "key=value", eg "threshold=1".  Scalars'
                        ', boolean and string values are supported.  Comma-'
                        'separated lists (eg "key=[value1,value2]") are '
                        'supported. These comma-separated lists can either '
                        'extract all values specified in the list or '
                        'all values specified within a range e.g. '
                        'key=[value1:value2]. When a range is specified, '
                        'this is inclusive of the endpoints of the range.')
    parser.add_argument('--units',
                        metavar='UNITS',
                        nargs='+',
                        default=None,
                        help='Optional: units of coordinate constraint(s) to '
                        'be applied, for use when the input coordinate '
                        'units are not ideal (eg for float equality). If '
                        'used, this list must match the CONSTRAINTS list in '
                        'order and length (with null values set to None).')
    parser.add_argument('--ignore-failure',
                        action='store_true',
                        default=False,
                        help='Option to ignore constraint match failure and '
                        'return the input cube.')
    args = parser.parse_args(args=argv)

    # Load Cube
    cube = load_cube(args.input_file)

    # Process Cube
    output_cube = process(cube, args.constraints, args.units)

    # Save Cube
    if output_cube is None and args.ignore_failure:
        save_netcdf(cube, args.output_file)
    elif output_cube is None:
        msg = "Constraint(s) could not be matched in input cube"
        raise ValueError(msg)
    else:
        save_netcdf(output_cube, args.output_file)
Exemple #9
0
    def test_adding_argument_with_defined_kwargs_dict_has_defualt(self):
        """Test that we can successfully add an argument to the ArgParser,
        when the argspec contained kwargs, and that the default value is
        captured."""

        args_to_add = [(['--one'], {'default': 1})]

        parser = ArgParser(central_arguments=None, specific_arguments=None)

        parser.add_arguments(args_to_add)
        result_args = parser.parse_args()
        # `--one` was not passed in, so we pick up the default - let's check
        # they agree...
        self.assertEqual(1, result_args.one)
Exemple #10
0
    def test_profile_is_not_called_when_disbaled(self):
        """Test that calling parse_args does not enable profiling when the
        --profile option is not added."""

        # temporarily patch compulsory args so that profiling is disabled by
        # default
        compulsory_arguments = {
            'profile': (['--profile'], {
                'default': False
            }),
            'profile_file': (['--profile-file'], {
                'default': None
            })
        }

        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            with patch('improver.argparser.profile_hook_enable') as \
                    mock_profile:
                parser = ArgParser(central_arguments=None,
                                   specific_arguments=None)
                parser.parse_args()
                self.assertEqual(mock_profile.call_count, 0)
Exemple #11
0
    def test_adding_argument_with_defined_kwargs_dict(self):
        """Test that we can successfully add an argument to the ArgParser,
        when the argspec contained kwargs."""

        # length of argspec is 2...
        args_to_add = [(['--foo'], {'default': 1})]
        expected_arg = 'foo'

        parser = ArgParser(central_arguments=None, specific_arguments=None)

        parser.add_arguments(args_to_add)
        result_args = parser.parse_args()
        result_args = vars(result_args).keys()
        self.assertIn(expected_arg, result_args)
Exemple #12
0
    def test_create_argparser_with_no_arguments(self):
        """Test that creating an ArgParser with no arguments has no
        arguments."""

        compulsory_arguments = {}

        # it doesn't matter what the centralized arguments are, because we
        # select None of them - we only need to patch the COMPULSORY_ARGUMENTS
        # to ensure there are none of them
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            parser = ArgParser(central_arguments=None, specific_arguments=None)
            args = parser.parse_args()
            args = vars(args).keys()
            self.assertEqual(len(args), 0)
Exemple #13
0
    def test_create_argparser_only_compulsory_arguments(self):
        """Test that creating an ArgParser with only compulsory arguments
        adds only the compulsory arguments."""

        compulsory_arguments = {'foo': (['--foo'], {})}

        # it doesn't matter what the centralized arguments are, because we
        # select None of them - only patch COMPULSORY_ARGUMENTS so we know
        # what to expect
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            parser = ArgParser(central_arguments=None, specific_arguments=None)
            args = parser.parse_args()
            args = vars(args).keys()
            self.assertCountEqual(args, ['foo'])
Exemple #14
0
    def test_create_argparser_only_specific_arguments(self):
        """Test that creating an ArgParser with only specific arguments
        adds only the specific arguments."""

        compulsory_arguments = {}
        specific_arguments = [(['--foo'], {})]

        # it doesn't matter what the centralized arguments are, because we
        # select None of them - patch the COMPULSORY_ARGUMENTS to be an empty
        # dict so that we don't add any of them
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            parser = ArgParser(central_arguments=None,
                               specific_arguments=specific_arguments)
            args = parser.parse_args()
            args = vars(args).keys()
            self.assertCountEqual(args, ['foo'])
Exemple #15
0
    def test_create_argparser_compulsory_and_specfic_arguments(self):
        """Test that creating an ArgParser with compulsory and specific
        arguments adds both of these and no others."""

        compulsory_arguments = {'foo': (['--foo'], {})}
        specific_arguments = [(['--bar'], {})]

        # it doesn't matter what the centralized arguments are, because we
        # select None of them - patch only the COMPULSORY_ARGUMENTS so we know
        # that `foo` is added from here
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            parser = ArgParser(central_arguments=None,
                               specific_arguments=specific_arguments)
            args = parser.parse_args()
            args = vars(args).keys()
            self.assertCountEqual(args, ['foo', 'bar'])
Exemple #16
0
def main(argv=None):
    """Parser to accept input data and an output destination before invoking
    the wet bulb temperature plugin. Also accepted is an optional
    convergence_condition argument that can be used to specify the tolerance of
    the Newton iterator used to calculate the wet bulb temperatures."""

    parser = ArgParser(
        description='Calculate a field of wet bulb temperatures.')
    parser.add_argument('temperature',
                        metavar='TEMPERATURE',
                        help='Path to a NetCDF file of air temperatures at '
                        'the points for which the wet bulb temperatures are '
                        'being calculated.')
    parser.add_argument('relative_humidity',
                        metavar='RELATIVE_HUMIDITY',
                        help='Path to a NetCDF file of relative humidities at '
                        'the points for for which the wet bulb temperatures '
                        'are being calculated.')
    parser.add_argument('pressure',
                        metavar='PRESSURE',
                        help='Path to a NetCDF file of air pressures at the '
                        'points for which the wet bulb temperatures are being'
                        ' calculated.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')
    parser.add_argument('--convergence_condition',
                        metavar='CONVERGENCE_CONDITION',
                        type=float,
                        default=0.05,
                        help='The convergence condition for the Newton '
                        'iterator in K. When the wet bulb temperature '
                        'stops changing by more than this amount between'
                        ' iterations, the solution is accepted.')

    args = parser.parse_args(args=argv)
    # Load Cubes
    temperature = load_cube(args.temperature)
    relative_humidity = load_cube(args.relative_humidity)
    pressure = load_cube(args.pressure)
    # Process Cube
    result = process(temperature, relative_humidity, pressure,
                     args.convergence_condition)
    # Save Cube
    save_netcdf(result, args.output_filepath)
Exemple #17
0
    def test_create_argparser_compulsory_and_centralized_arguments(self):
        """Test that creating an ArgParser with compulsory and centralized
        arguments adds both of these and no others."""

        compulsory_arguments = {'foo': (['--foo'], {})}
        centralized_arguments = {'bar': (['--bar'], {})}

        # patch the COMPULSORY_ARGUMENTS so we know that `foo` exists
        # and the CENTRALIZED_ARGUMENTS so we know that `bar` exists.
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
                       centralized_arguments):
                parser = ArgParser(central_arguments=['bar'],
                                   specific_arguments=None)
                args = parser.parse_args()
                args = vars(args).keys()
                self.assertCountEqual(args, ['foo', 'bar'])
Exemple #18
0
    def test_create_argparser_all_arguments(self):
        """Test that creating an ArgParser with compulsory, centralized and
        specific arguments adds the arguments from all 3 collections."""

        compulsory_arguments = {'foo': (['--foo'], {})}
        centralized_arguments = {'bar': (['--bar'], {})}
        specific_arguments = [(['--baz'], {})]

        # patch both the COMPULSORY_ARGUMENTS and CENTRALIZED_ARGUMENTS, so
        # that `foo` and `bar` are added from these (respectively)
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
                       centralized_arguments):
                parser = ArgParser(central_arguments=['bar'],
                                   specific_arguments=specific_arguments)
                args = parser.parse_args()
                args = vars(args).keys()
                self.assertCountEqual(args, ['foo', 'bar', 'baz'])
Exemple #19
0
    def test_create_argparser_only_centralized_arguments(self):
        """Test that creating an ArgParser with only centralized arguments
        adds only the selected centralized arguments."""

        compulsory_arguments = {}
        centralized_arguments = {'foo': (['--foo'], {})}

        # patch the COMPULSORY_ARGUMENTS to an empty dict (so there are none)
        # and patch CENTRALIZED_ARGUMENTS so we know that `foo` can be selected
        # from it
        with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
                   compulsory_arguments):
            with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
                       centralized_arguments):
                parser = ArgParser(central_arguments=['foo'],
                                   specific_arguments=None)
                args = parser.parse_args()
                args = vars(args).keys()
                self.assertCountEqual(args, ['foo'])
def main(argv=None):
    """Extend radar mask based on coverage data."""
    parser = ArgParser(description="Extend radar mask based on coverage "
                       "data.")
    parser.add_argument("radar_data_filepath",
                        metavar="RADAR_DATA_FILEPATH",
                        type=str,
                        help="Full path to input NetCDF file "
                        "containing the radar variable to remask.")
    parser.add_argument("coverage_filepath",
                        metavar="COVERAGE_FILEPATH",
                        type=str,
                        help="Full path to input NetCDF file "
                        "containing radar coverage data.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILEPATH",
                        type=str,
                        help="Full path to save remasked radar data "
                        "NetCDF file.")
    parser.add_argument("--fix_float64",
                        action='store_true',
                        default=False,
                        help="Check and fix cube for float64 data. Without "
                        "this option an exception will be raised if "
                        "float64 data is found but no fix applied.")

    args = parser.parse_args(args=argv)

    # load data
    radar_data = load_cube(args.radar_data_filepath)
    coverage = load_cube(args.coverage_filepath)

    # extend mask
    remasked_data = ExtendRadarMask().process(radar_data, coverage)

    # Check and fix for float64 data only option:
    check_cube_not_float64(remasked_data, fix=args.fix_float64)

    # save output file
    save_netcdf(remasked_data, args.output_filepath)
Exemple #21
0
def main(argv=None):
    """Load in the arguments and ensure they are set correctly. Then run
    the time-lagged ensembles on the input cubes."""
    parser = ArgParser(
        description='This combines the realizations from different forecast '
                    'cycles into one cube. It does this by taking an input '
                    'CubeList containing forecasts from different cycles and '
                    'merges them into a single cube, removing any metadata '
                    'that does not match.')
    parser.add_argument('input_filenames', metavar='INPUT_FILENAMES',
                        nargs="+", type=str,
                        help='Paths to input NetCDF files for the time-lagged '
                        'ensemble to combine the realizations.')
    parser.add_argument('output_file', metavar='OUTPUT_FILE',
                        help='The output file for the processed NetCDF.')
    args = parser.parse_args(args=argv)

    # Load the cubes
    cubes = iris.cube.CubeList([])
    for filename in args.input_filenames:
        new_cube = load_cube(filename)
        cubes.append(new_cube)

    # Warns if a single file is input
    if len(cubes) == 1:
        warnings.warn('Only a single cube input, so time lagging will have '
                      'no effect.')
        save_netcdf(cubes[0], args.output_file)
    # Raises an error if the validity times do not match
    else:
        for i, this_cube in enumerate(cubes):
            for later_cube in cubes[i+1:]:
                if this_cube.coord('time') == later_cube.coord('time'):
                    continue
                else:
                    msg = ("Cubes with mismatched validity times are not "
                           "compatible.")
                    raise ValueError(msg)
        result = GenerateTimeLaggedEnsemble().process(cubes)
        save_netcdf(result, args.output_file)
Exemple #22
0
def main(argv=None):
    """Generate target grid with a halo around the source file grid."""

    parser = ArgParser(description='Generate grid with halo from a source '
                       'domain input file. The grid is populated with zeroes.')
    parser.add_argument('input_file', metavar='INPUT_FILE', help="NetCDF file "
                        "containing data on a source grid.")
    parser.add_argument('output_file', metavar='OUTPUT_FILE', help="NetCDF "
                        "file defining the target grid with additional halo.")
    parser.add_argument('--halo_radius', metavar='HALO_RADIUS', default=162000,
                        type=float, help="Size of halo (in m) with which to "
                        "pad the input grid.  Default is 162 000 m.")
    args = parser.parse_args(args=argv)

    # Load Cube
    cube = load_cube(args.input_file)

    # Process Cube
    result = process(cube, args.halo_radius)

    # Save Cube
    save_netcdf(result, args.output_file)
def main(argv=None):
    """Apply lapse rates to temperature data."""
    parser = ArgParser(description='Apply downscaling temperature adjustment '
                       'using calculated lapse rate.')

    parser.add_argument('temperature_filepath',
                        metavar='TEMPERATURE_FILEPATH',
                        help='Full path to input temperature NetCDF file')
    parser.add_argument('lapse_rate_filepath',
                        metavar='LAPSE_RATE_FILEPATH',
                        help='Full path to input lapse rate NetCDF file')
    parser.add_argument('source_orography',
                        metavar='SOURCE_OROG_FILE',
                        help='Full path to NetCDF file containing the source '
                        'model orography')
    parser.add_argument('target_orography',
                        metavar='TARGET_OROG_FILE',
                        help='Full path to target orography NetCDF file '
                        '(to which temperature will be downscaled)')
    parser.add_argument('output_file',
                        metavar='OUTPUT_FILE',
                        help='File name '
                        'to write lapse rate adjusted temperature data')

    args = parser.parse_args(args=argv)

    # read cubes
    temperature = load_cube(args.temperature_filepath)
    lapse_rate = load_cube(args.lapse_rate_filepath)
    source_orog = load_cube(args.source_orography)
    target_orog = load_cube(args.target_orography)

    # apply lapse rate to temperature data
    adjusted_temperature = apply_gridded_lapse_rate(temperature, lapse_rate,
                                                    source_orog, target_orog)

    # save to output file
    save_netcdf(adjusted_temperature, args.output_file)
def main(argv=None):
    """ Load in the arguments for feels like temperature and ensure they are
    set correctly. Then calculate the feels like temperature using the data
    in the input cubes."""
    parser = ArgParser(
        description="This calculates the feels like temperature using a "
        "combination of the wind chill index and Steadman's "
        "apparent temperature equation.")
    parser.add_argument("temperature",
                        metavar="TEMPERATURE",
                        help="Path to a NetCDF file of air temperatures at "
                        "screen level.")
    parser.add_argument("wind_speed",
                        metavar="WIND_SPEED",
                        help="Path to the NetCDF file of wind speed at 10m.")
    parser.add_argument("relative_humidity",
                        metavar="RELATIVE_HUMIDITY",
                        help="Path to the NetCDF file of relative humidity "
                        "at screen level.")
    parser.add_argument("pressure",
                        metavar="PRESSURE",
                        help="Path to a NetCDF file of mean sea level "
                        "pressure.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF")

    args = parser.parse_args(args=argv)

    temperature = load_cube(args.temperature)
    wind_speed = load_cube(args.wind_speed)
    relative_humidity = load_cube(args.relative_humidity)
    pressure = load_cube(args.pressure)

    result = calculate_feels_like_temperature(temperature, wind_speed,
                                              relative_humidity, pressure)
    save_netcdf(result, args.output_filepath)
Exemple #25
0
    def test_adding_multiple_arguments(self):
        """Test that we can successfully add multiple arguments to the
        ArgParser."""

        # we will not actually pass anything in, so the Namespace will receive
        # the defaults (if any) - only check the keys of the Namespace derived
        # dictionary
        args_to_add = [(['--foo'], {}), (['--bar', '--b'], {})]

        expected_namespace_keys = ['foo', 'bar']  # + compulsory...

        # explicitly pass nothing in - will only have compulsory arguments
        # and the ones we added...
        parser = ArgParser(central_arguments=None, specific_arguments=None)

        parser.add_arguments(args_to_add)
        result_args = parser.parse_args()
        result_args = vars(result_args).keys()
        # we could also add compulsory arguments to expected_namespace_keys
        # and then assertCountEqual - (order unimportant), but this
        # is unnecessary - just use loop:
        # (or we could patch compulsory arguments to be an empty dictionary)
        for expected_arg in expected_namespace_keys:
            self.assertIn(expected_arg, result_args)
Exemple #26
0
def main(argv=None):
    """Load in arguments and get going."""
    parser = ArgParser(
        description=('Reads input orography and landmask fields. Creates '
                     'a series of topographic zone weights to indicate '
                     'where an orography point sits within the defined '
                     'topographic bands. If the orography point is in the '
                     'centre of a topographic band, then a single band will '
                     'have a weight of 1.0. If the orography point is at the '
                     'edge of a topographic band, then the upper band will '
                     'have a 0.5 weight whilst the lower band will also have '
                     'a 0.5 weight. Otherwise, the weight will vary linearly '
                     'between the centre of a topographic band and the edge.'))
    parser.add_argument('input_filepath_standard_orography',
                        metavar='INPUT_FILE_STANDARD_OROGRAPHY',
                        help=('A path to an input NetCDF orography file to '
                              'be processed'))
    parser.add_argument('output_filepath', metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')
    parser.add_argument('--input_filepath_landmask', metavar='INPUT_FILE_LAND',
                        help=('A path to an input NetCDF land mask file to be '
                              'processed. If provided, sea points will be '
                              'masked and set to the default fill value. If '
                              'no land mask is provided, weights will be '
                              'generated for sea points as well as land, '
                              'included in the appropriate topographic band.'))

    parser.add_argument('--force', dest='force', default=False,
                        action='store_true',
                        help=('If keyword is set (i.e. True), ancillaries '
                              'will be generated even if doing so will '
                              'overwrite existing files'))
    parser.add_argument('--thresholds_filepath',
                        metavar='THRESHOLDS_FILEPATH',
                        default=None,
                        help=("The path to a json file which can be used "
                              "to set the number and size of topographic "
                              "bounds. If unset a default bounds dictionary"
                              " will be used:"
                              "{'bounds': [[-500., 50.], [50., 100.], "
                              "[100., 150.],[150., 200.], [200., 250.], "
                              "[250., 300.], [300., 400.], [400., 500.], "
                              "[500., 650.],[650., 800.], [800., 950.], "
                              "[950., 6000.]], 'units': 'm'}"))
    args = parser.parse_args(args=argv)

    thresholds_dict = load_json_or_none(args.thresholds_filepath)

    if not os.path.exists(args.output_filepath) or args.force:
        orography = load_cube(args.input_filepath_standard_orography)
        landmask = None
        if args.input_filepath_landmask:
            try:
                landmask = load_cube(args.input_filepath_landmask)
            except IOError as err:
                msg = ("Loading land mask has been unsuccessful: {}. "
                       "This may be because the land mask could not be "
                       "located in {}; run "
                       'improver-generate-landmask-ancillary first.').format(
                           err, args.input_filepath_landmask)
                raise IOError(msg)

        result = process(landmask, orography, thresholds_dict)
        # Save Cube
        save_netcdf(result, args.output_filepath)
    else:
        print('File already exists here: ', args.output_filepath)
def main(argv=None):
    """Calculate orographic enhancement of precipitation from model pressure,
    temperature, relative humidity and wind input files"""

    parser = ArgParser(description='Calculate orographic enhancement using the'
                       ' ResolveWindComponents() and OrographicEnhancement() '
                       'plugins. Outputs data on the high resolution orography'
                       ' grid and regridded to the coarser resolution of the '
                       'input diagnostic variables.')

    parser.add_argument('temperature_filepath',
                        metavar='TEMPERATURE_FILEPATH',
                        help='Full path to input NetCDF file of temperature on'
                        ' height levels')
    parser.add_argument('humidity_filepath',
                        metavar='HUMIDITY_FILEPATH',
                        help='Full path to input NetCDF file of relative '
                        'humidity on height levels')
    parser.add_argument('pressure_filepath',
                        metavar='PRESSURE_FILEPATH',
                        help='Full path to input NetCDF file of pressure on '
                        'height levels')
    parser.add_argument('windspeed_filepath',
                        metavar='WINDSPEED_FILEPATH',
                        help='Full path to input NetCDF file of wind speed on '
                        'height levels')
    parser.add_argument('winddir_filepath',
                        metavar='WINDDIR_FILEPATH',
                        help='Full path to input NetCDF file of wind direction'
                        ' on height levels')
    parser.add_argument('orography_filepath',
                        metavar='OROGRAPHY_FILEPATH',
                        help='Full path to input NetCDF high resolution '
                        'orography ancillary. This should be on the same or a '
                        'finer resolution grid than the input variables, and '
                        'defines the grid on which the orographic enhancement '
                        'will be calculated.')
    parser.add_argument('output_dir',
                        metavar='OUTPUT_DIR',
                        help='Directory '
                        'to write output orographic enhancement files')
    parser.add_argument('--boundary_height',
                        type=float,
                        default=1000.,
                        help='Model height level to extract variables for '
                        'calculating orographic enhancement, as proxy for '
                        'the boundary layer.')
    parser.add_argument('--boundary_height_units',
                        type=str,
                        default='m',
                        help='Units of the boundary height specified for '
                        'extracting model levels.')

    args = parser.parse_args(args=argv)

    constraint_info = (args.boundary_height, args.boundary_height_units)

    temperature = load_and_extract(args.temperature_filepath, *constraint_info)
    humidity = load_and_extract(args.humidity_filepath, *constraint_info)
    pressure = load_and_extract(args.pressure_filepath, *constraint_info)
    wind_speed = load_and_extract(args.windspeed_filepath, *constraint_info)
    wind_dir = load_and_extract(args.winddir_filepath, *constraint_info)

    # load high resolution orography
    orography = load_cube(args.orography_filepath)

    orogenh_high_res, orogenh_standard = process(temperature, humidity,
                                                 pressure, wind_speed,
                                                 wind_dir, orography)

    # generate file names
    fname_standard = os.path.join(args.output_dir,
                                  generate_file_name(orogenh_standard))
    fname_high_res = os.path.join(
        args.output_dir,
        generate_file_name(orogenh_high_res,
                           parameter="orographic_enhancement_high_resolution"))

    # save output files
    save_netcdf(orogenh_standard, fname_standard)
    save_netcdf(orogenh_high_res, fname_high_res)
Exemple #28
0
def main(argv=None):
    """Load in arguments for estimating coefficients for Ensemble Model Output
       Statistics (EMOS), otherwise known as Non-homogeneous Gaussian
       Regression (NGR). 2 sources of input data must be provided: historical
       forecasts and historical truth data (to use in calibration). The
       estimated coefficients are written to a netCDF file.
    """
    parser = ArgParser(
        description='Estimate coefficients for Ensemble Model Output '
                    'Statistics (EMOS), otherwise known as Non-homogeneous '
                    'Gaussian Regression (NGR). There are two methods for '
                    'inputting data into this CLI, either by providing the '
                    'historic forecasts and truth separately, or by providing '
                    'a combined list of historic forecasts and truths along '
                    'with historic_forecast_identifier and truth_identifier '
                    'arguments to provide metadata that distinguishes between '
                    'them.')
    parser.add_argument('distribution', metavar='DISTRIBUTION',
                        choices=['gaussian', 'truncated_gaussian'],
                        help='The distribution that will be used for '
                             'calibration. This will be dependent upon the '
                             'input phenomenon. This has to be supported by '
                             'the minimisation functions in '
                             'ContinuousRankedProbabilityScoreMinimisers.')
    parser.add_argument('cycletime', metavar='CYCLETIME', type=str,
                        help='This denotes the cycle at which forecasts '
                             'will be calibrated using the calculated '
                             'EMOS coefficients. The validity time in the '
                             'output coefficients cube will be calculated '
                             'relative to this cycletime. '
                             'This cycletime is in the format '
                             'YYYYMMDDTHHMMZ.')

    # Historic forecast and truth filepaths
    parser.add_argument(
        '--historic_filepath', metavar='HISTORIC_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing the '
             'historic forecast(s) used for calibration. '
             'This must be supplied with an associated truth filepath. '
             'Specification of either the combined_filepath, '
             'historic_forecast_identifier or the truth_identifier is '
             'invalid with this argument.')
    parser.add_argument(
        '--truth_filepath', metavar='TRUTH_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing the '
             'historic truth analyses used for calibration. '
             'This must be supplied with an associated historic filepath. '
             'Specification of either the combined_filepath, '
             'historic_forecast_identifier or the truth_identifier is '
             'invalid with this argument.')

    # Input filepaths
    parser.add_argument(
        '--combined_filepath', metavar='COMBINED_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing '
             'both the historic forecast(s) and truth '
             'analyses used for calibration. '
             'This must be supplied with both the '
             'historic_forecast_identifier and the truth_identifier. '
             'Specification of either the historic_filepath or the '
             'truth_filepath is invalid with this argument.')
    parser.add_argument(
        "--historic_forecast_identifier",
        metavar='HISTORIC_FORECAST_IDENTIFIER',
        help='The path to a json file containing metadata '
             'information that defines the historic forecast. '
             'This must be supplied with both the combined_filepath and the '
             'truth_identifier. Specification of either the historic_filepath'
             'or the truth_filepath is invalid with this argument. '
             'The intended contents is described in improver.'
             'ensemble_calibration.ensemble_calibration_utilities.'
             'SplitHistoricForecastAndTruth.')
    parser.add_argument(
        "--truth_identifier", metavar='TRUTH_IDENTIFIER',
        help='The path to a json file containing metadata '
             'information that defines the truth.'
             'This must be supplied with both the combined_filepath and the '
             'historic_forecast_identifier. Specification of either the '
             'historic_filepath or the truth_filepath is invalid with this '
             'argument. The intended contents is described in improver.'
             'ensemble_calibration.ensemble_calibration_utilities.'
             'SplitHistoricForecastAndTruth.')

    # Output filepath
    parser.add_argument('output_filepath', metavar='OUTPUT_FILEPATH',
                        help='The output path for the processed NetCDF')
    # Optional arguments.
    parser.add_argument('--units', metavar='UNITS',
                        help='The units that calibration should be undertaken '
                             'in. The historical forecast and truth will be '
                             'converted as required.')
    parser.add_argument('--predictor_of_mean', metavar='PREDICTOR_OF_MEAN',
                        choices=['mean', 'realizations'], default='mean',
                        help='String to specify the predictor used to '
                             'calibrate the forecast mean. Currently the '
                             'ensemble mean ("mean") and the ensemble '
                             'realizations ("realizations") are supported as '
                             'options. Default: "mean".')
    parser.add_argument('--max_iterations', metavar='MAX_ITERATIONS',
                        type=np.int32, default=1000,
                        help='The maximum number of iterations allowed '
                             'until the minimisation has converged to a '
                             'stable solution. If the maximum number '
                             'of iterations is reached, but the '
                             'minimisation has not yet converged to a '
                             'stable solution, then the available solution '
                             'is used anyway, and a warning is raised.'
                             'This may be modified for testing purposes '
                             'but otherwise kept fixed. If the '
                             'predictor_of_mean is "realizations", '
                             'then the number of iterations may require '
                             'increasing, as there will be more coefficients '
                             'to solve for.')
    args = parser.parse_args(args=argv)

    # Load Cubes
    historic_forecast = load_cube(args.historic_filepath, allow_none=True)
    truth = load_cube(args.truth_filepath, allow_none=True)

    combined = (load_cubelist(args.combined_filepath)
                if args.combined_filepath else None)
    historic_forecast_dict = (
        load_json_or_none(args.historic_forecast_identifier))
    truth_dict = load_json_or_none(args.truth_identifier)

    # Process Cube
    coefficients = process(historic_forecast, truth, combined,
                           historic_forecast_dict, truth_dict,
                           args.distribution, args.cycletime, args.units,
                           args.predictor_of_mean, args.max_iterations)
    # Save Cube
    # Check whether a coefficients cube has been created. If the historic
    # forecasts and truths provided did not match in validity time, then
    # no coefficients would have been calculated.
    if coefficients:
        save_netcdf(coefficients, args.output_filepath)
Exemple #29
0
def main(argv=None):
    """Load in arguments and get going."""
    parser = ArgParser(
        description="Calculate percentiled data over a given coordinate by "
        "collapsing that coordinate. Typically used to convert realization "
        "data into percentiled data, but may calculate over any "
        "dimension coordinate. Alternatively, calling this CLI with a dataset"
        " containing probabilities will convert those to percentiles using "
        "the ensemble copula coupling plugin. If no particular percentiles "
        "are given at which to calculate values and no 'number of percentiles'"
        " to calculate are specified, the following defaults will be used: "
        "[0, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 100]")
    parser.add_argument("input_filepath",
                        metavar="INPUT_FILE",
                        help="A path to an input NetCDF file to be processed")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILE",
                        help="The output path for the processed NetCDF")
    parser.add_argument("--coordinates",
                        metavar="COORDINATES_TO_COLLAPSE",
                        nargs="+",
                        help="Coordinate or coordinates over which to collapse"
                        " data and calculate percentiles; e.g. "
                        "'realization' or 'latitude longitude'. This argument "
                        "must be provided when collapsing a coordinate or "
                        "coordinates to create percentiles, but is redundant "
                        "when converting probabilities to percentiles and may "
                        "be omitted. This coordinate(s) will be removed "
                        "and replaced by a percentile coordinate.")
    parser.add_argument('--ecc_bounds_warning',
                        default=False,
                        action='store_true',
                        help='If True, where calculated percentiles are '
                        'outside the ECC bounds range, raise a warning '
                        'rather than an exception.')
    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument("--percentiles",
                       metavar="PERCENTILES",
                       nargs="+",
                       default=None,
                       type=float,
                       help="Optional definition of percentiles at which to "
                       "calculate data, e.g. --percentiles 0 33.3 66.6 100")
    group.add_argument('--no-of-percentiles',
                       default=None,
                       type=int,
                       metavar='NUMBER_OF_PERCENTILES',
                       help="Optional definition of the number of percentiles "
                       "to be generated, these distributed regularly with the "
                       "aim of dividing into blocks of equal probability.")

    args = parser.parse_args(args=argv)

    # Load Cube
    cube = load_cube(args.input_filepath)

    # Process Cube
    result = process(cube, args.coordinates, args.ecc_bounds_warning,
                     args.percentiles, args.no_of_percentiles)

    # Save Cube
    save_netcdf(result, args.output_filepath)
Exemple #30
0
def main(argv=None):
    """Load in arguments for applying neighbourhood processing when using a
    mask."""
    parser = ArgParser(
        description='Neighbourhood the input dataset over two distinct regions'
        ' of land and sea. If performed as a single level neighbourhood, a '
        'land-sea mask should be provided. If instead topographic_zone '
        'neighbourhooding is being employed, the mask should be one of '
        'topographic zones. In the latter case a weights array is also needed'
        ' to collapse the topographic_zone coordinate. These weights are '
        'created with the improver generate-topography-bands-weights CLI and '
        'should be made using a land-sea mask, which will then be employed '
        'within this code to draw the distinction between the two surface '
        'types.')

    parser.add_argument('input_filepath',
                        metavar='INPUT_FILE',
                        help='A path to an input NetCDF file to be processed.')
    parser.add_argument('input_mask_filepath',
                        metavar='INPUT_MASK',
                        help=('A path to an input NetCDF file containing '
                              'either a mask of topographic zones over land '
                              'or a land-sea mask.'))
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')

    mask_group = parser.add_argument_group(
        'Collapse weights - required if using a topographic zones mask')
    mask_group.add_argument('--weights_for_collapsing_dim',
                            metavar='WEIGHTS',
                            default=None,
                            help='A path to an weights NetCDF file containing '
                            'the weights which are used for collapsing the '
                            'dimension gained through masking. These weights '
                            'must have been created using a land-sea mask.')

    radius_group = parser.add_argument_group(
        'Neighbourhooding Radius - Set only one of the options')
    group = radius_group.add_mutually_exclusive_group()
    group.add_argument('--radius',
                       metavar='RADIUS',
                       type=float,
                       help='The radius (in m) for neighbourhood processing.')
    group.add_argument('--radii-by-lead-time',
                       metavar=('RADII_BY_LEAD_TIME', 'LEAD_TIME_IN_HOURS'),
                       nargs=2,
                       help='The radii for neighbourhood processing '
                       'and the associated lead times at which the radii are '
                       'valid. The radii are in metres whilst the lead time '
                       'has units of hours. The radii and lead times are '
                       'expected as individual comma-separated lists with '
                       'the list of radii given first followed by a list of '
                       'lead times to indicate at what lead time each radii '
                       'should be used. For example: 10000,12000,14000 1,2,3 '
                       'where a lead time of 1 hour uses a radius of 10000m, '
                       'a lead time of 2 hours uses a radius of 12000m, etc.')
    parser.add_argument('--sum_or_fraction',
                        default="fraction",
                        choices=["sum", "fraction"],
                        help='The neighbourhood output can either be in the '
                        'form of a sum of the neighbourhood, or a '
                        'fraction calculated by dividing the sum of the '
                        'neighbourhood by the neighbourhood area. '
                        '"fraction" is the default option.')
    parser.add_argument('--intermediate_filepath',
                        default=None,
                        help='Intermediate filepath for results following '
                        'topographic masked neighbourhood processing of '
                        'land points and prior to collapsing the '
                        'topographic_zone coordinate. Intermediate files '
                        'will not be produced if no topographic masked '
                        'neighbourhood processing occurs.')

    args = parser.parse_args(args=argv)

    cube = load_cube(args.input_filepath)
    mask = load_cube(args.input_mask_filepath, no_lazy_load=True)
    weights = None
    if any([
            'topographic_zone' in coord.name()
            for coord in mask.coords(dim_coords=True)
    ]):

        if mask.attributes['topographic_zones_include_seapoints'] == 'True':
            raise ValueError('The topographic zones mask cube must have been '
                             'masked to exclude sea points, but '
                             'topographic_zones_include_seapoints = True')

        if not args.weights_for_collapsing_dim:
            raise IOError('A weights cube must be provided if using a mask '
                          'of topographic zones to collapse the resulting '
                          'vertical dimension.')

        weights = load_cube(args.weights_for_collapsing_dim, no_lazy_load=True)

    result, intermediate_cube = process(cube, mask, args.radius,
                                        args.radii_by_lead_time, weights,
                                        args.sum_or_fraction,
                                        args.intermediate_filepath)

    save_netcdf(result, args.output_filepath)
    if args.intermediate_filepath:
        save_netcdf(intermediate_cube, args.intermediate_filepath)