Exemplo n.º 1
0
 def test_wildcard_files_with_constraint(self):
     """Test that the loading works correctly, if a wildcarded filepath is
     provided and a constraint is provided that is only valid for a subset
     of the available files."""
     low_cloud_cube = self.cube.copy()
     low_cloud_cube.rename("low_type_cloud_area_fraction")
     low_cloud_cube.units = 1
     save_netcdf(low_cloud_cube, self.low_cloud_filepath)
     medium_cloud_cube = self.cube.copy()
     medium_cloud_cube.rename("medium_type_cloud_area_fraction")
     medium_cloud_cube.units = 1
     save_netcdf(medium_cloud_cube, self.med_cloud_filepath)
     constr = iris.Constraint("low_type_cloud_area_fraction")
     result = load_cubelist(
         [self.low_cloud_filepath, self.med_cloud_filepath],
         constraints=constr)
     self.assertEqual(len(result), 1)
     self.assertArrayAlmostEqual(result[0].coord("realization").points,
                                 self.realization_points)
     self.assertArrayAlmostEqual(result[0].coord("time").points,
                                 self.time_points)
     self.assertArrayAlmostEqual(result[0].coord("latitude").points,
                                 self.latitude_points)
     self.assertArrayAlmostEqual(result[0].coord("longitude").points,
                                 self.longitude_points)
Exemplo n.º 2
0
def main(argv=None):
    """Parser to accept input data and an output destination before invoking
    the weather symbols plugin.
    """

    diagnostics = interrogate_decision_tree('high_resolution')
    n_files = len(diagnostics)
    dlist = (' - {}\n' * n_files)

    diagnostics_global = interrogate_decision_tree('global')
    n_files_global = len(diagnostics_global)
    dlist_global = (' - {}\n' * n_files_global)

    parser = ArgParser(
        description='Calculate gridded weather symbol codes.\nThis plugin '
        'requires a specific set of input diagnostics, where data\nmay be in '
        'any units to which the thresholds given below can\nbe converted:\n' +
        dlist.format(*diagnostics) + '\n\n or for global data\n\n' +
        dlist_global.format(*diagnostics_global),
        formatter_class=RawTextHelpFormatter)

    parser.add_argument(
        'input_filepaths',
        metavar='INPUT_FILES',
        nargs="+",
        help='Paths to files containing the required input diagnostics.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')
    parser.add_argument("--wxtree",
                        metavar="WXTREE",
                        default="high_resolution",
                        choices=["high_resolution", "global"],
                        help="Weather Code tree.\n"
                        "Choices are high_resolution or global.\n"
                        "Default=high_resolution.",
                        type=str)

    args = parser.parse_args(args=argv)

    # Load Cube
    cubes = load_cubelist(args.input_filepaths, no_lazy_load=True)
    required_number_of_inputs = n_files
    if args.wxtree == 'global':
        required_number_of_inputs = n_files_global
    if len(cubes) != required_number_of_inputs:
        msg = ('Incorrect number of inputs: files {} gave {} cubes' +
               ', {} required').format(args.input_filepaths, len(cubes),
                                       required_number_of_inputs)
        raise argparse.ArgumentTypeError(msg)

    # Process Cube
    result = process(cubes, args.wxtree)

    # Save Cube
    save_netcdf(result, args.output_filepath)
Exemplo n.º 3
0
 def test_single_file(self):
     """Test that the loading works correctly, if only the filepath is
     provided."""
     result = load_cubelist(self.filepath)
     self.assertArrayAlmostEqual(result[0].coord("realization").points,
                                 self.realization_points)
     self.assertArrayAlmostEqual(result[0].coord("time").points,
                                 self.time_points)
     self.assertArrayAlmostEqual(result[0].coord("latitude").points,
                                 self.latitude_points)
     self.assertArrayAlmostEqual(result[0].coord("longitude").points,
                                 self.longitude_points)
Exemplo n.º 4
0
 def test_wildcard_files(self):
     """Test that the loading works correctly, if a wildcarded filepath is
     provided."""
     filepath = os.path.join(self.directory, "*.nc")
     result = load_cubelist(filepath)
     self.assertArrayAlmostEqual(result[0].coord("realization").points,
                                 self.realization_points)
     self.assertArrayAlmostEqual(result[0].coord("time").points,
                                 self.time_points)
     self.assertArrayAlmostEqual(result[0].coord("latitude").points,
                                 self.latitude_points)
     self.assertArrayAlmostEqual(result[0].coord("longitude").points,
                                 self.longitude_points)
Exemplo n.º 5
0
 def test_multiple_files(self):
     """Test that the loading works correctly, if a path to multiple files
     is provided."""
     result = load_cubelist([self.filepath, self.filepath])
     for cube in result:
         self.assertArrayAlmostEqual(
             cube.coord("realization").points, self.realization_points)
         self.assertArrayAlmostEqual(
             cube.coord("time").points, self.time_points)
         self.assertArrayAlmostEqual(
             cube.coord("latitude").points, self.latitude_points)
         self.assertArrayAlmostEqual(
             cube.coord("longitude").points, self.longitude_points)
Exemplo n.º 6
0
 def test_no_lazy_load(self):
     """Test that the loading works correctly with lazy load bypassing."""
     result = load_cubelist([self.filepath, self.filepath],
                            no_lazy_load=True)
     self.assertIsInstance(result, iris.cube.CubeList)
     self.assertArrayEqual([False, False],
                           [_.has_lazy_data() for _ in result])
     for cube in result:
         self.assertArrayAlmostEqual(
             cube.coord("realization").points, self.realization_points)
         self.assertArrayAlmostEqual(
             cube.coord("time").points, self.time_points)
         self.assertArrayAlmostEqual(
             cube.coord("latitude").points, self.latitude_points)
         self.assertArrayAlmostEqual(
             cube.coord("longitude").points, self.longitude_points)
Exemplo n.º 7
0
 def test_no_lazy_load(self):
     """Test that the cubelist returned upon loading does not contain
     lazy data."""
     result = load_cubelist([self.filepath, self.filepath],
                            no_lazy_load=True)
     self.assertArrayEqual([False, False],
                           [_.has_lazy_data() for _ in result])
     for cube in result:
         self.assertArrayAlmostEqual(
             cube.coord("realization").points, self.realization_points)
         self.assertArrayAlmostEqual(
             cube.coord("time").points, self.time_points)
         self.assertArrayAlmostEqual(
             cube.coord("latitude").points, self.latitude_points)
         self.assertArrayAlmostEqual(
             cube.coord("longitude").points, self.longitude_points)
Exemplo n.º 8
0
 def test_no_partial_merge_single_arg(self):
     """Test that we can load three files independently when a wildcarded
     filepath is provided, even if two of the cubes could be merged"""
     low_cloud_cube = self.cube.copy()
     low_cloud_cube.rename("low_type_cloud_area_fraction")
     low_cloud_cube.coord("time").points = (
         low_cloud_cube.coord("time").points + 3600)
     low_cloud_cube.coord("forecast_period").points = (
         low_cloud_cube.coord("forecast_period").points - 3600)
     save_netcdf(low_cloud_cube, self.low_cloud_filepath)
     medium_cloud_cube = self.cube.copy()
     medium_cloud_cube.rename("medium_type_cloud_area_fraction")
     save_netcdf(medium_cloud_cube, self.med_cloud_filepath)
     fileglob = os.path.join(self.directory, "*.nc")
     result = load_cubelist(fileglob)
     self.assertEqual(len(result), 3)
Exemplo n.º 9
0
def get_additional_diagnostics(diagnostic_name,
                               diagnostic_data_path,
                               time_extract=None):
    """
    Load additional diagnostics needed for particular spot data processes.

    Args:
        diagnostic_name (string):
            The name of the diagnostic to be loaded. Used to find
            the relevant file.

        time_extract (iris.Constraint):
            An iris constraint to extract and return only data from the desired
            time.

    Returns:
        cube (iris.cube.CubeList):
            CubeList containing the desired diagnostic data, with a single
            entry if time_extract is provided.

    Raises:
        IOError : If files are not found.
        ValueError : If required diagnostics are not found in the read files.

    """
    # Search diadnostic data directory for all files relevant to current
    # diagnostic.
    files_to_read = [
        os.path.join(dirpath, filename)
        for dirpath, _, files in os.walk(diagnostic_data_path)
        for filename in files if diagnostic_name in filename
    ]

    if not files_to_read:
        raise IOError('The relevant data files for {}, which is required '
                      'as an additional diagnostic is not available '
                      'in {}.'.format(diagnostic_name, diagnostic_data_path))
    cubes = load_cubelist(files_to_read)

    if time_extract is not None:
        with iris.FUTURE.context(cell_datetime_objects=True):
            cubes = cubes.extract(time_extract)
        if not cubes:
            raise ValueError('No diagnostics match {}'.format(time_extract))
    return cubes
Exemplo n.º 10
0
 def test_no_partial_merge_list_args(self):
     """Test that we can load three files independently when a wildcarded
     filepath is provided in a single-item list.  This is the form in which
     multi-item arguments ("nargs=+") are provided via "argparse" from the
     loading CLIs."""
     low_cloud_cube = self.cube.copy()
     low_cloud_cube.rename("low_type_cloud_area_fraction")
     low_cloud_cube.coord("time").points = (
         low_cloud_cube.coord("time").points + 3600)
     low_cloud_cube.coord("forecast_period").points = (
         low_cloud_cube.coord("forecast_period").points - 3600)
     save_netcdf(low_cloud_cube, self.low_cloud_filepath)
     medium_cloud_cube = self.cube.copy()
     medium_cloud_cube.rename("medium_type_cloud_area_fraction")
     save_netcdf(medium_cloud_cube, self.med_cloud_filepath)
     fileglob = os.path.join(self.directory, "*.nc")
     result = load_cubelist([fileglob])
     self.assertEqual(len(result), 3)
def main(argv=None):
    """Load in arguments and ensure they are set correctly.
       Then run Triangular weighted blending across the given coordinate."""
    parser = ArgParser(
        description='Use the TriangularWeightedBlendAcrossAdjacentPoints to '
                    'blend across a particular coordinate. It does not '
                    'collapse the coordinate, but instead blends across '
                    'adjacent points and puts the blended values back in the '
                    'original coordinate, with adjusted bounds.')
    parser.add_argument('coordinate', type=str,
                        metavar='COORDINATE_TO_BLEND_OVER',
                        help='The coordinate over which the blending '
                             'will be applied.')
    parser.add_argument('central_point', metavar='CENTRAL_POINT', type=float,
                        help='Central point at which the output from the '
                             'triangular weighted blending will be '
                             'calculated. This should be in the units of the '
                             'units argument that is passed in. '
                             'This value should be a point on the '
                             'coordinate for blending over.')
    parser.add_argument('--units', metavar='UNIT_STRING', required=True,
                        help='Units of the the central_point and width.')
    parser.add_argument('--calendar', metavar='CALENDAR',
                        default='gregorian',
                        help='Calendar for parameter_unit if required. '
                             'Default=gregorian')
    parser.add_argument('--width', metavar='TRIANGLE_WIDTH', type=float,
                        required=True,
                        help='Width of the triangular weighting function used '
                             'in the blending, in the units of the '
                             'units argument passed in.')
    parser.add_argument('--blend_time_using_forecast_period',
                        default=False, action='store_true', help='Flag that '
                        'we are blending over time but using the forecast '
                        'period coordinate as a proxy.  Note this should only '
                        'be used when time and forecast_period share a '
                        'dimension: ie when all files provided are from the '
                        'same forecast cycle.')
    parser.add_argument('input_filepaths', metavar='INPUT_FILES', nargs="+",
                        help='Paths to input NetCDF files including and '
                             'surrounding the central_point.')
    parser.add_argument('output_filepath', metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')

    args = parser.parse_args(args=argv)

    # TriangularWeightedBlendAcrossAdjacentPoints can't currently handle
    # blending over times where iris reads the coordinate points as datetime
    # objects.  Fail here to avoid unhelpful errors downstream.
    if "time" in args.coordinate:
        msg = ("Cannot blend over {} coordinate (points encoded as datetime "
               "objects)".format(args.coordinate))
        raise ValueError(msg)

    # This is left as a placeholder for when we have this capability
    if args.coordinate == 'time':
        units = Unit(args.units, args.calendar)
    else:
        units = args.units

    cubelist = load_cubelist(args.input_filepaths)

    if (args.blend_time_using_forecast_period and
            args.coordinate == 'forecast_period'):
        cube = MergeCubes().process(cubelist, check_time_bounds_ranges=True)
    elif args.blend_time_using_forecast_period:
        msg = ('"--blend_time_using_forecast_period" can only be used with '
               '"forecast_period" coordinate')
        raise ValueError(msg)
    else:
        cube = MergeCubes().process(cubelist)

    BlendingPlugin = TriangularWeightedBlendAcrossAdjacentPoints(
        args.coordinate, args.central_point, units, args.width)
    result = BlendingPlugin.process(cube)
    save_netcdf(result, args.output_filepath)
Exemplo n.º 12
0
 def test_lazy_load(self):
     """Test that the cubelist returned upon loading does contain
     lazy data."""
     result = load_cubelist([self.filepath, self.filepath])
     self.assertArrayEqual([True, True],
                           [_.has_lazy_data() for _ in result])
Exemplo n.º 13
0
def main(argv=None):
    """Calculate optical flow advection velocities"""

    parser = ArgParser(
        description="Calculate optical flow components from input fields.")

    parser.add_argument("input_filepaths",
                        metavar="INPUT_FILEPATHS",
                        nargs=3,
                        type=str,
                        help="Paths to the input radar "
                        "files. There should be 3 input files at T, T-1 and "
                        "T-2 from which to calculate optical flow velocities. "
                        "The files require a 'time' coordinate on which they "
                        "are sorted, so the order of inputs does not matter.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILEPATH",
                        help="The output path for the resulting NetCDF")

    parser.add_argument("--nowcast_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="Optional list of full paths to "
                        "output nowcast files. Overrides OUTPUT_DIR. Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--orographic_enhancement_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file",
                        metavar="JSON_FILE",
                        default=None,
                        help="Filename for the json file containing "
                        "required changes to attributes. "
                        "Every output file will have the attributes_dict "
                        "applied. Defaults to None.",
                        type=str)

    # OpticalFlow plugin configurable parameters
    parser.add_argument("--ofc_box_size",
                        type=int,
                        default=30,
                        help="Size of "
                        "square 'box' (in grid squares) within which to solve "
                        "the optical flow equations.")
    parser.add_argument("--smart_smoothing_iterations",
                        type=int,
                        default=100,
                        help="Number of iterations to perform in enforcing "
                        "smoothness constraint for optical flow velocities.")

    args = parser.parse_args(args=argv)

    # Load Cubes and JSON
    attributes_dict = load_json_or_none(args.json_file)
    original_cube_list = load_cubelist(args.input_filepaths)
    oe_cube = load_cube(args.orographic_enhancement_filepaths, allow_none=True)

    # Process
    result = process(original_cube_list, oe_cube, attributes_dict,
                     args.ofc_box_size, args.smart_smoothing_iterations)

    # Save Cubes
    save_netcdf(result, args.output_filepath)
Exemplo n.º 14
0
def main(argv=None):
    """Load in arguments and ensure they are set correctly.
       Then run Triangular weighted blending across the given coordinate."""
    parser = ArgParser(
        description='Use the TriangularWeightedBlendAcrossAdjacentPoints to '
        'blend across a particular coordinate. It does not '
        'collapse the coordinate, but instead blends across '
        'adjacent points and puts the blended values back in the '
        'original coordinate, with adjusted bounds.')
    parser.add_argument('coordinate',
                        type=str,
                        metavar='COORDINATE_TO_BLEND_OVER',
                        help='The coordinate over which the blending '
                        'will be applied.')
    parser.add_argument('central_point',
                        metavar='CENTRAL_POINT',
                        type=float,
                        help='Central point at which the output from the '
                        'triangular weighted blending will be '
                        'calculated. This should be in the units of the '
                        'units argument that is passed in. '
                        'This value should be a point on the '
                        'coordinate for blending over.')
    parser.add_argument('--units',
                        metavar='UNIT_STRING',
                        required=True,
                        help='Units of the central_point and width.')
    parser.add_argument('--calendar',
                        metavar='CALENDAR',
                        default='gregorian',
                        help='Calendar for parameter_unit if required. '
                        'Default=gregorian')
    parser.add_argument('--width',
                        metavar='TRIANGLE_WIDTH',
                        type=float,
                        required=True,
                        help='Width of the triangular weighting function used '
                        'in the blending, in the units of the '
                        'units argument passed in.')
    parser.add_argument('--blend_time_using_forecast_period',
                        default=False,
                        action='store_true',
                        help='Flag that '
                        'we are blending over time but using the forecast '
                        'period coordinate as a proxy.  Note this should only '
                        'be used when time and forecast_period share a '
                        'dimension: ie when all files provided are from the '
                        'same forecast cycle.')
    parser.add_argument('input_filepaths',
                        metavar='INPUT_FILES',
                        nargs="+",
                        help='Paths to input NetCDF files including and '
                        'surrounding the central_point.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')

    args = parser.parse_args(args=argv)

    # Load Cubelist
    cubelist = load_cubelist(args.input_filepaths)
    # Process Cube
    result = process(cubelist, args.coordinate, args.central_point, args.units,
                     args.width, args.calendar,
                     args.blend_time_using_forecast_period)
    # Save Cube
    save_netcdf(result, args.output_filepath)
Exemplo n.º 15
0
def main(argv=None):
    """Load in arguments and ensure they are set correctly.
       Then load in the data to blend and calculate default weights
       using the method chosen before carrying out the blending."""
    parser = ArgParser(
        description='Calculate the default weights to apply in weighted '
        'blending plugins using the ChooseDefaultWeightsLinear or '
        'ChooseDefaultWeightsNonLinear plugins. Then apply these '
        'weights to the dataset using the BasicWeightedAverage plugin.'
        ' Required for ChooseDefaultWeightsLinear: y0val and ynval.'
        ' Required for ChooseDefaultWeightsNonLinear: cval.'
        ' Required for ChooseWeightsLinear with dict: wts_dict.')

    parser.add_argument('--wts_calc_method',
                        metavar='WEIGHTS_CALCULATION_METHOD',
                        choices=['linear', 'nonlinear', 'dict'],
                        default='linear',
                        help='Method to use to calculate '
                        'weights used in blending. "linear" (default): '
                        'calculate linearly varying blending weights. '
                        '"nonlinear": calculate blending weights that decrease'
                        ' exponentially with increasing blending coordinate. '
                        '"dict": calculate weights using a dictionary passed '
                        'in as a command line argument.')

    parser.add_argument('coordinate',
                        type=str,
                        metavar='COORDINATE_TO_AVERAGE_OVER',
                        help='The coordinate over which the blending '
                        'will be applied.')
    parser.add_argument('--cycletime',
                        metavar='CYCLETIME',
                        type=str,
                        help='The forecast reference time to be used after '
                        'blending has been applied, in the format '
                        'YYYYMMDDTHHMMZ. If not provided, the blended file '
                        'will take the latest available forecast reference '
                        'time from the input cubes supplied.')
    parser.add_argument('--model_id_attr',
                        metavar='MODEL_ID_ATTR',
                        type=str,
                        default=None,
                        help='The name of the netCDF file attribute to be '
                        'used to identify the source model for '
                        'multi-model blends. Default is None. '
                        'Must be present on all input '
                        'files if blending over models.')
    parser.add_argument('--spatial_weights_from_mask',
                        action='store_true',
                        default=False,
                        help='If set this option will result in the generation'
                        ' of spatially varying weights based on the'
                        ' masks of the data we are blending. The'
                        ' one dimensional weights are first calculated '
                        ' using the chosen weights calculation method,'
                        ' but the weights will then be adjusted spatially'
                        ' based on where there is masked data in the data'
                        ' we are blending. The spatial weights are'
                        ' calculated using the'
                        ' SpatiallyVaryingWeightsFromMask plugin.')

    parser.add_argument('input_filepaths',
                        metavar='INPUT_FILES',
                        nargs="+",
                        help='Paths to input files to be blended.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')

    spatial = parser.add_argument_group(
        'Spatial weights from mask options',
        'Options for calculating the spatial weights using the '
        'SpatiallyVaryingWeightsFromMask plugin.')
    spatial.add_argument('--fuzzy_length',
                         metavar='FUZZY_LENGTH',
                         type=float,
                         default=20000,
                         help='When calculating spatially varying weights we'
                         ' can smooth the weights so that areas close to'
                         ' areas that are masked have lower weights than'
                         ' those further away. This fuzzy length controls'
                         ' the scale over which the weights are smoothed.'
                         ' The fuzzy length is in terms of m, the'
                         ' default is 20km. This distance is then'
                         ' converted into a number of grid squares,'
                         ' which does not have to be an integer. Assumes'
                         ' the grid spacing is the same in the x and y'
                         ' directions, and raises an error if this is not'
                         ' true. See SpatiallyVaryingWeightsFromMask for'
                         ' more detail.')

    linear = parser.add_argument_group(
        'linear weights options', 'Options for the linear weights '
        'calculation in '
        'ChooseDefaultWeightsLinear')
    linear.add_argument('--y0val',
                        metavar='LINEAR_STARTING_POINT',
                        type=float,
                        help='The relative value of the weighting start point '
                        '(lowest value of blend coord) for choosing default '
                        'linear weights. This must be a positive float or 0.')
    linear.add_argument('--ynval',
                        metavar='LINEAR_END_POINT',
                        type=float,
                        help='The relative value of the weighting '
                        'end point (highest value of blend coord) for choosing'
                        ' default linear weights. This must be a positive '
                        'float or 0.  Note that if blending over forecast '
                        'reference time, ynval >= y0val would normally be '
                        'expected (to give greater weight to the more recent '
                        'forecast).')

    nonlinear = parser.add_argument_group(
        'nonlinear weights options', 'Options for the non-linear '
        'weights calculation in '
        'ChooseDefaultWeightsNonLinear')
    nonlinear.add_argument('--cval',
                           metavar='NON_LINEAR_FACTOR',
                           type=float,
                           help='Factor used to determine how skewed the '
                           'non linear weights will be. A value of 1 '
                           'implies equal weighting.')

    wts_dict = parser.add_argument_group(
        'dict weights options', 'Options for linear weights to be '
        'calculated based on parameters '
        'read from a json file dict')
    wts_dict.add_argument('--wts_dict',
                          metavar='WEIGHTS_DICTIONARY',
                          help='Path to json file containing dictionary from '
                          'which to calculate blending weights. Dictionary '
                          'format is as specified in the improver.blending.'
                          'weights.ChooseWeightsLinear plugin.')
    wts_dict.add_argument('--weighting_coord',
                          metavar='WEIGHTING_COORD',
                          default='forecast_period',
                          help='Name of '
                          'coordinate over which linear weights should be '
                          'scaled. This coordinate must be available in the '
                          'weights dictionary.')

    args = parser.parse_args(args=argv)

    # reject incorrect argument combinations
    if (args.wts_calc_method == "linear") and args.cval:
        parser.wrong_args_error('cval', 'linear')
    if ((args.wts_calc_method == "nonlinear")
            and np.any([args.y0val, args.ynval])):
        parser.wrong_args_error('y0val, ynval', 'non-linear')

    if (args.wts_calc_method == "dict") and not args.wts_dict:
        parser.error('Dictionary is required if --wts_calc_method="dict"')

    weights_dict = load_json_or_none(args.wts_dict)

    # Load cubes to be blended.
    cubelist = load_cubelist(args.input_filepaths)

    result = process(cubelist, args.wts_calc_method, args.coordinate,
                     args.cycletime, args.weighting_coord, weights_dict,
                     args.y0val, args.ynval, args.cval, args.model_id_attr,
                     args.spatial_weights_from_mask, args.fuzzy_length)

    save_netcdf(result, args.output_filepath)
Exemplo n.º 16
0
def main(argv=None):
    """Calculate optical flow advection velocities and (optionally)
    extrapolate data."""

    parser = ArgParser(
        description="Calculate optical flow components from input fields "
        "and (optionally) extrapolate to required lead times.")

    parser.add_argument("input_filepaths",
                        metavar="INPUT_FILEPATHS",
                        nargs=3,
                        type=str,
                        help="Paths to the input radar "
                        "files. There should be 3 input files at T, T-1 and "
                        "T-2 from which to calculate optical flow velocities. "
                        "The files require a 'time' coordinate on which they "
                        "are sorted, so the order of inputs does not matter.")
    parser.add_argument("--output_dir",
                        metavar="OUTPUT_DIR",
                        type=str,
                        default='',
                        help="Directory to write all output files,"
                        " or only advection velocity components if "
                        "NOWCAST_FILEPATHS is specified.")
    parser.add_argument("--nowcast_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="Optional list of full paths to "
                        "output nowcast files. Overrides OUTPUT_DIR. Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--orographic_enhancement_filepaths",
                        nargs="+",
                        type=str,
                        default=None,
                        help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file",
                        metavar="JSON_FILE",
                        default=None,
                        help="Filename for the json file containing "
                        "required changes to the metadata. Information "
                        "describing the intended contents of the json file "
                        "is available in "
                        "improver.utilities.cube_metadata.amend_metadata."
                        "Every output cube will have the metadata_dict "
                        "applied. Defaults to None.",
                        type=str)

    # OpticalFlow plugin configurable parameters
    parser.add_argument("--ofc_box_size",
                        type=int,
                        default=30,
                        help="Size of "
                        "square 'box' (in grid squares) within which to solve "
                        "the optical flow equations.")
    parser.add_argument("--smart_smoothing_iterations",
                        type=int,
                        default=100,
                        help="Number of iterations to perform in enforcing "
                        "smoothness constraint for optical flow velocities.")

    # AdvectField options
    parser.add_argument("--extrapolate",
                        action="store_true",
                        default=False,
                        help="Optional flag to advect current data forward to "
                        "specified lead times.")
    parser.add_argument("--max_lead_time",
                        type=int,
                        default=360,
                        help="Maximum lead time required (mins).  Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--lead_time_interval",
                        type=int,
                        default=15,
                        help="Interval between required lead times (mins). "
                        "Ignored unless '--extrapolate' is set.")

    args = parser.parse_args(args=argv)

    # Load Cubes and JSON.
    metadata_dict = load_json_or_none(args.json_file)
    original_cube_list = load_cubelist(args.input_filepaths)
    oe_cube = load_cube(args.orographic_enhancement_filepaths, allow_none=True)

    # Process
    forecast_cubes, u_and_v_mean = process(original_cube_list, oe_cube,
                                           metadata_dict, args.ofc_box_size,
                                           args.smart_smoothing_iterations,
                                           args.extrapolate,
                                           args.max_lead_time,
                                           args.lead_time_interval)

    # Save Cubes
    for wind_cube in u_and_v_mean:
        file_name = generate_file_name(wind_cube)
        save_netcdf(wind_cube, os.path.join(args.output_dir, file_name))

    # advect latest input data to the required lead times
    if args.extrapolate:
        if args.nowcast_filepaths:
            if len(args.nowcast_filepaths) != len(forecast_cubes):
                raise ValueError("Require exactly one output file name for "
                                 "each forecast lead time")

        for i, cube in enumerate(forecast_cubes):
            # save to a suitably-named output file
            if args.nowcast_filepaths:
                file_name = args.nowcast_filepaths[i]
            else:
                file_name = os.path.join(args.output_dir,
                                         generate_file_name(cube))
            save_netcdf(cube, file_name)
Exemplo n.º 17
0
 def test_a_cubelist_is_loaded(self):
     """Test that a cubelist is loaded when a valid filepath is provided."""
     result = load_cubelist(self.filepath)
     self.assertIsInstance(result, iris.cube.CubeList)
Exemplo n.º 18
0
def main(argv=None):
    """Load in arguments for estimating coefficients for Ensemble Model Output
       Statistics (EMOS), otherwise known as Non-homogeneous Gaussian
       Regression (NGR). 2 sources of input data must be provided: historical
       forecasts and historical truth data (to use in calibration). The
       estimated coefficients are written to a netCDF file.
    """
    parser = ArgParser(
        description='Estimate coefficients for Ensemble Model Output '
                    'Statistics (EMOS), otherwise known as Non-homogeneous '
                    'Gaussian Regression (NGR). There are two methods for '
                    'inputting data into this CLI, either by providing the '
                    'historic forecasts and truth separately, or by providing '
                    'a combined list of historic forecasts and truths along '
                    'with historic_forecast_identifier and truth_identifier '
                    'arguments to provide metadata that distinguishes between '
                    'them.')
    parser.add_argument('distribution', metavar='DISTRIBUTION',
                        choices=['gaussian', 'truncated_gaussian'],
                        help='The distribution that will be used for '
                             'calibration. This will be dependent upon the '
                             'input phenomenon. This has to be supported by '
                             'the minimisation functions in '
                             'ContinuousRankedProbabilityScoreMinimisers.')
    parser.add_argument('cycletime', metavar='CYCLETIME', type=str,
                        help='This denotes the cycle at which forecasts '
                             'will be calibrated using the calculated '
                             'EMOS coefficients. The validity time in the '
                             'output coefficients cube will be calculated '
                             'relative to this cycletime. '
                             'This cycletime is in the format '
                             'YYYYMMDDTHHMMZ.')

    # Historic forecast and truth filepaths
    parser.add_argument(
        '--historic_filepath', metavar='HISTORIC_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing the '
             'historic forecast(s) used for calibration. '
             'This must be supplied with an associated truth filepath. '
             'Specification of either the combined_filepath, '
             'historic_forecast_identifier or the truth_identifier is '
             'invalid with this argument.')
    parser.add_argument(
        '--truth_filepath', metavar='TRUTH_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing the '
             'historic truth analyses used for calibration. '
             'This must be supplied with an associated historic filepath. '
             'Specification of either the combined_filepath, '
             'historic_forecast_identifier or the truth_identifier is '
             'invalid with this argument.')

    # Input filepaths
    parser.add_argument(
        '--combined_filepath', metavar='COMBINED_FILEPATH', nargs='+',
        help='Paths to the input NetCDF files containing '
             'both the historic forecast(s) and truth '
             'analyses used for calibration. '
             'This must be supplied with both the '
             'historic_forecast_identifier and the truth_identifier. '
             'Specification of either the historic_filepath or the '
             'truth_filepath is invalid with this argument.')
    parser.add_argument(
        "--historic_forecast_identifier",
        metavar='HISTORIC_FORECAST_IDENTIFIER',
        help='The path to a json file containing metadata '
             'information that defines the historic forecast. '
             'This must be supplied with both the combined_filepath and the '
             'truth_identifier. Specification of either the historic_filepath'
             'or the truth_filepath is invalid with this argument. '
             'The intended contents is described in improver.'
             'ensemble_calibration.ensemble_calibration_utilities.'
             'SplitHistoricForecastAndTruth.')
    parser.add_argument(
        "--truth_identifier", metavar='TRUTH_IDENTIFIER',
        help='The path to a json file containing metadata '
             'information that defines the truth.'
             'This must be supplied with both the combined_filepath and the '
             'historic_forecast_identifier. Specification of either the '
             'historic_filepath or the truth_filepath is invalid with this '
             'argument. The intended contents is described in improver.'
             'ensemble_calibration.ensemble_calibration_utilities.'
             'SplitHistoricForecastAndTruth.')

    # Output filepath
    parser.add_argument('output_filepath', metavar='OUTPUT_FILEPATH',
                        help='The output path for the processed NetCDF')
    # Optional arguments.
    parser.add_argument('--units', metavar='UNITS',
                        help='The units that calibration should be undertaken '
                             'in. The historical forecast and truth will be '
                             'converted as required.')
    parser.add_argument('--predictor_of_mean', metavar='PREDICTOR_OF_MEAN',
                        choices=['mean', 'realizations'], default='mean',
                        help='String to specify the predictor used to '
                             'calibrate the forecast mean. Currently the '
                             'ensemble mean ("mean") and the ensemble '
                             'realizations ("realizations") are supported as '
                             'options. Default: "mean".')
    parser.add_argument('--max_iterations', metavar='MAX_ITERATIONS',
                        type=np.int32, default=1000,
                        help='The maximum number of iterations allowed '
                             'until the minimisation has converged to a '
                             'stable solution. If the maximum number '
                             'of iterations is reached, but the '
                             'minimisation has not yet converged to a '
                             'stable solution, then the available solution '
                             'is used anyway, and a warning is raised.'
                             'This may be modified for testing purposes '
                             'but otherwise kept fixed. If the '
                             'predictor_of_mean is "realizations", '
                             'then the number of iterations may require '
                             'increasing, as there will be more coefficients '
                             'to solve for.')
    args = parser.parse_args(args=argv)

    # Load Cubes
    historic_forecast = load_cube(args.historic_filepath, allow_none=True)
    truth = load_cube(args.truth_filepath, allow_none=True)

    combined = (load_cubelist(args.combined_filepath)
                if args.combined_filepath else None)
    historic_forecast_dict = (
        load_json_or_none(args.historic_forecast_identifier))
    truth_dict = load_json_or_none(args.truth_identifier)

    # Process Cube
    coefficients = process(historic_forecast, truth, combined,
                           historic_forecast_dict, truth_dict,
                           args.distribution, args.cycletime, args.units,
                           args.predictor_of_mean, args.max_iterations)
    # Save Cube
    # Check whether a coefficients cube has been created. If the historic
    # forecasts and truths provided did not match in validity time, then
    # no coefficients would have been calculated.
    if coefficients:
        save_netcdf(coefficients, args.output_filepath)
Exemplo n.º 19
0
 def test_lazy_load(self):
     """Test that the loading works correctly with lazy loading."""
     result = load_cubelist([self.filepath, self.filepath])
     self.assertIsInstance(result, iris.cube.CubeList)
     self.assertArrayEqual([True, True],
                           [_.has_lazy_data() for _ in result])
Exemplo n.º 20
0
def main(argv=None):
    """Load in arguments and ensure they are set correctly.
       Then load in the data to blend and calculate default weights
       using the method chosen before carrying out the blending."""
    parser = ArgParser(
        description='Calculate the default weights to apply in weighted '
        'blending plugins using the ChooseDefaultWeightsLinear or '
        'ChooseDefaultWeightsNonLinear plugins. Then apply these '
        'weights to the dataset using the BasicWeightedAverage plugin.'
        ' Required for ChooseDefaultWeightsLinear: y0val and ynval.'
        ' Required for ChooseDefaultWeightsNonLinear: cval.'
        ' Required for ChooseWeightsLinear with dict: wts_dict.')

    parser.add_argument('--wts_calc_method',
                        metavar='WEIGHTS_CALCULATION_METHOD',
                        choices=['linear', 'nonlinear', 'dict'],
                        default='linear',
                        help='Method to use to calculate '
                        'weights used in blending. "linear" (default): '
                        'calculate linearly varying blending weights. '
                        '"nonlinear": calculate blending weights that decrease'
                        ' exponentially with increasing blending coordinate. '
                        '"dict": calculate weights using a dictionary passed '
                        'in as a command line argument.')

    parser.add_argument('coordinate',
                        type=str,
                        metavar='COORDINATE_TO_AVERAGE_OVER',
                        help='The coordinate over which the blending '
                        'will be applied.')
    parser.add_argument('--coordinate_unit',
                        metavar='UNIT_STRING',
                        default='hours since 1970-01-01 00:00:00',
                        help='Units for blending coordinate. Default= '
                        'hours since 1970-01-01 00:00:00')
    parser.add_argument('--calendar',
                        metavar='CALENDAR',
                        help='Calendar for time coordinate. Default=gregorian')
    parser.add_argument('--cycletime',
                        metavar='CYCLETIME',
                        type=str,
                        help='The forecast reference time to be used after '
                        'blending has been applied, in the format '
                        'YYYYMMDDTHHMMZ. If not provided, the blended file '
                        'will take the latest available forecast reference '
                        'time from the input cubes supplied.')
    parser.add_argument('--model_id_attr',
                        metavar='MODEL_ID_ATTR',
                        type=str,
                        default="mosg__model_configuration",
                        help='The name of the netCDF file attribute to be '
                        'used to identify the source model for '
                        'multi-model blends. Default assumes Met Office '
                        'model metadata. Must be present on all input '
                        'files if blending over models.')
    parser.add_argument('--spatial_weights_from_mask',
                        action='store_true',
                        default=False,
                        help='If set this option will result in the generation'
                        ' of spatially varying weights based on the'
                        ' masks of the data we are blending. The'
                        ' one dimensional weights are first calculated '
                        ' using the chosen weights calculation method,'
                        ' but the weights will then be adjusted spatially'
                        ' based on where there is masked data in the data'
                        ' we are blending. The spatial weights are'
                        ' calculated using the'
                        ' SpatiallyVaryingWeightsFromMask plugin.')
    parser.add_argument('weighting_mode',
                        metavar='WEIGHTED_BLEND_MODE',
                        choices=['weighted_mean', 'weighted_maximum'],
                        help='The method used in the weighted blend. '
                        '"weighted_mean": calculate a normal weighted'
                        ' mean across the coordinate. '
                        '"weighted_maximum": multiplies the values in the'
                        ' coordinate by the weights, and then takes the'
                        ' maximum.')

    parser.add_argument('input_filepaths',
                        metavar='INPUT_FILES',
                        nargs="+",
                        help='Paths to input files to be blended.')
    parser.add_argument('output_filepath',
                        metavar='OUTPUT_FILE',
                        help='The output path for the processed NetCDF.')

    spatial = parser.add_argument_group(
        'Spatial weights from mask options',
        'Options for calculating the spatial weights using the '
        'SpatiallyVaryingWeightsFromMask plugin.')
    spatial.add_argument('--fuzzy_length',
                         metavar='FUZZY_LENGTH',
                         type=float,
                         default=20000,
                         help='When calculating spatially varying weights we'
                         ' can smooth the weights so that areas close to'
                         ' areas that are masked have lower weights than'
                         ' those further away. This fuzzy length controls'
                         ' the scale over which the weights are smoothed.'
                         ' The fuzzy length is in terms of m, the'
                         ' default is 20km. This distance is then'
                         ' converted into a number of grid squares,'
                         ' which does not have to be an integer. Assumes'
                         ' the grid spacing is the same in the x and y'
                         ' directions, and raises an error if this is not'
                         ' true. See SpatiallyVaryingWeightsFromMask for'
                         ' more detail.')

    linear = parser.add_argument_group(
        'linear weights options', 'Options for the linear weights '
        'calculation in '
        'ChooseDefaultWeightsLinear')
    linear.add_argument('--y0val',
                        metavar='LINEAR_STARTING_POINT',
                        type=float,
                        help='The relative value of the weighting start point '
                        '(lowest value of blend coord) for choosing default '
                        'linear weights. This must be a positive float or 0.')
    linear.add_argument('--ynval',
                        metavar='LINEAR_END_POINT',
                        type=float,
                        help='The relative value of the weighting '
                        'end point (highest value of blend coord) for choosing'
                        ' default linear weights. This must be a positive '
                        'float or 0.  Note that if blending over forecast '
                        'reference time, ynval >= y0val would normally be '
                        'expected (to give greater weight to the more recent '
                        'forecast).')

    nonlinear = parser.add_argument_group(
        'nonlinear weights options', 'Options for the non-linear '
        'weights calculation in '
        'ChooseDefaultWeightsNonLinear')
    nonlinear.add_argument('--cval',
                           metavar='NON_LINEAR_FACTOR',
                           type=float,
                           help='Factor used to determine how skewed the '
                           'non linear weights will be. '
                           'A value of 1 implies equal weighting. If not '
                           'set, a default value of cval=0.85 is set.')

    wts_dict = parser.add_argument_group(
        'dict weights options', 'Options for linear weights to be '
        'calculated based on parameters '
        'read from a json file dict')
    wts_dict.add_argument('--wts_dict',
                          metavar='WEIGHTS_DICTIONARY',
                          help='Path to json file containing dictionary from '
                          'which to calculate blending weights. Dictionary '
                          'format is as specified in the improver.blending.'
                          'weights.ChooseWeightsLinear plugin.')
    wts_dict.add_argument('--weighting_coord',
                          metavar='WEIGHTING_COORD',
                          default='forecast_period',
                          help='Name of '
                          'coordinate over which linear weights should be '
                          'scaled. This coordinate must be avilable in the '
                          'weights dictionary.')

    args = parser.parse_args(args=argv)

    # if the linear weights method is called with non-linear args or vice
    # versa, exit with error
    if (args.wts_calc_method == "linear") and args.cval:
        parser.wrong_args_error('cval', 'linear')
    if ((args.wts_calc_method == "nonlinear")
            and np.any([args.y0val, args.ynval])):
        parser.wrong_args_error('y0val, ynval', 'non-linear')
    if (args.wts_calc_method == "dict") and not args.wts_dict:
        parser.error('Dictionary is required if --wts_calc_method="dict"')

    # set blending coordinate units
    if "time" in args.coordinate:
        coord_unit = Unit(args.coordinate_unit, args.calendar)
    elif args.coordinate_unit != 'hours since 1970-01-01 00:00:00.':
        coord_unit = args.coordinate_unit
    else:
        coord_unit = 'no_unit'

    # For blending across models, only blending across "model_id" is directly
    # supported. This is because the blending coordinate must be sortable, in
    # order to ensure that the data cube and the weights cube have coordinates
    # in the same order for blending. Whilst the model_configuration is
    # sortable itself, as it is associated with model_id, which is the
    # dimension coordinate, sorting the model_configuration coordinate can
    # result in the model_id coordinate becoming non-monotonic. As dimension
    # coordinates must be monotonic, this leads to the model_id coordinate
    # being demoted to an auxiliary coordinate. Therefore, for simplicity
    # model_id is used as the blending coordinate, instead of
    # model_configuration.
    # TODO: Support model_configuration as a blending coordinate directly.
    if args.coordinate == "model_configuration":
        blend_coord = "model_id"
        dict_coord = "model_configuration"
    else:
        blend_coord = args.coordinate
        dict_coord = args.coordinate

    # load cubes to be blended
    cubelist = load_cubelist(args.input_filepaths)

    # determine whether or not to equalise forecast periods for model
    # blending weights calculation
    weighting_coord = (args.weighting_coord
                       if args.weighting_coord else "forecast_period")

    # prepare cubes for weighted blending
    merger = MergeCubesForWeightedBlending(blend_coord,
                                           weighting_coord=weighting_coord,
                                           model_id_attr=args.model_id_attr)
    cube = merger.process(cubelist, cycletime=args.cycletime)

    # if the coord for blending does not exist or has only one value,
    # update metadata only
    coord_names = [coord.name() for coord in cube.coords()]
    if (blend_coord not in coord_names) or (len(
            cube.coord(blend_coord).points) == 1):
        result = cube.copy()
        conform_metadata(result, cube, blend_coord, cycletime=args.cycletime)
        # raise a warning if this happened because the blend coordinate
        # doesn't exist
        if blend_coord not in coord_names:
            warnings.warn('Blend coordinate {} is not present on input '
                          'data'.format(blend_coord))

    # otherwise, calculate weights and blend across specified dimension
    else:
        weights = calculate_blending_weights(
            cube,
            blend_coord,
            args.wts_calc_method,
            wts_dict=args.wts_dict,
            weighting_coord=args.weighting_coord,
            coord_unit=coord_unit,
            y0val=args.y0val,
            ynval=args.ynval,
            cval=args.cval,
            dict_coord=dict_coord)

        if args.spatial_weights_from_mask:
            check_if_grid_is_equal_area(cube)
            grid_cells_x, _ = convert_distance_into_number_of_grid_cells(
                cube, args.fuzzy_length, int_grid_cells=False)
            SpatialWeightsPlugin = SpatiallyVaryingWeightsFromMask(
                grid_cells_x)
            weights = SpatialWeightsPlugin.process(cube, weights, blend_coord)

        # blend across specified dimension
        BlendingPlugin = WeightedBlendAcrossWholeDimension(
            blend_coord, args.weighting_mode, cycletime=args.cycletime)
        result = BlendingPlugin.process(cube, weights=weights)

    save_netcdf(result, args.output_filepath)
Exemplo n.º 21
0
def main(argv=None):
    """Calculate optical flow advection velocities and (optionally)
    extrapolate data."""

    parser = ArgParser(
        description="Calculate optical flow components from input fields "
        "and (optionally) extrapolate to required lead times.")

    parser.add_argument("input_filepaths", metavar="INPUT_FILEPATHS",
                        nargs=3, type=str, help="Paths to the input radar "
                        "files. There should be 3 input files at T, T-1 and "
                        "T-2 from which to calculate optical flow velocities. "
                        "The files require a 'time' coordinate on which they "
                        "are sorted, so the order of inputs does not matter.")
    parser.add_argument("--output_dir", metavar="OUTPUT_DIR", type=str,
                        default='', help="Directory to write all output files,"
                        " or only advection velocity components if "
                        "NOWCAST_FILEPATHS is specified.")
    parser.add_argument("--nowcast_filepaths", nargs="+", type=str,
                        default=None, help="Optional list of full paths to "
                        "output nowcast files. Overrides OUTPUT_DIR. Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--orographic_enhancement_filepaths", nargs="+",
                        type=str, default=None, help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file", metavar="JSON_FILE", default=None,
                        help="Filename for the json file containing "
                        "required changes to the metadata. Information "
                        "describing the intended contents of the json file "
                        "is available in "
                        "improver.utilities.cube_metadata.amend_metadata."
                        "Every output cube will have the metadata_dict "
                        "applied. Defaults to None.", type=str)

    # OpticalFlow plugin configurable parameters
    parser.add_argument("--ofc_box_size", type=int, default=30, help="Size of "
                        "square 'box' (in grid squares) within which to solve "
                        "the optical flow equations.")
    parser.add_argument("--smart_smoothing_iterations", type=int, default=100,
                        help="Number of iterations to perform in enforcing "
                        "smoothness constraint for optical flow velocities.")

    # AdvectField options
    parser.add_argument("--extrapolate", action="store_true", default=False,
                        help="Optional flag to advect current data forward to "
                        "specified lead times.")
    parser.add_argument("--max_lead_time", type=int, default=360,
                        help="Maximum lead time required (mins).  Ignored "
                        "unless '--extrapolate' is set.")
    parser.add_argument("--lead_time_interval", type=int, default=15,
                        help="Interval between required lead times (mins). "
                        "Ignored unless '--extrapolate' is set.")

    args = parser.parse_args(args=argv)

    # read input data
    original_cube_list = load_cubelist(args.input_filepaths)

    if args.orographic_enhancement_filepaths:
        # Subtract orographic enhancement
        oe_cube = load_cube(args.orographic_enhancement_filepaths)
        cube_list = ApplyOrographicEnhancement("subtract").process(
            original_cube_list, oe_cube)
    else:
        cube_list = original_cube_list
        if any("precipitation_rate" in cube.name() for cube in cube_list):
            cube_names = [cube.name() for cube in cube_list]
            msg = ("For precipitation fields, orographic enhancement "
                   "filepaths must be supplied. The names of the cubes "
                   "supplied were: {}".format(cube_names))
            raise ValueError(msg)

    # order input files by validity time
    cube_list.sort(key=lambda x: x.coord("time").points[0])
    time_coord = cube_list[-1].coord("time")

    metadata_dict = None
    if args.json_file:
        # Load JSON file for metadata amendments.
        with open(args.json_file, 'r') as input_file:
            metadata_dict = json.load(input_file)

    # calculate optical flow velocities from T-1 to T and T-2 to T-1
    ofc_plugin = OpticalFlow(iterations=args.smart_smoothing_iterations,
                             metadata_dict=metadata_dict)
    ucubes = iris.cube.CubeList([])
    vcubes = iris.cube.CubeList([])
    for older_cube, newer_cube in zip(cube_list[:-1], cube_list[1:]):
        ucube, vcube = ofc_plugin.process(older_cube, newer_cube,
                                          boxsize=args.ofc_box_size)
        ucubes.append(ucube)
        vcubes.append(vcube)

    # average optical flow velocity components
    ucube = ucubes.merge_cube()
    umean = ucube.collapsed("time", iris.analysis.MEAN)
    umean.coord("time").points = time_coord.points
    umean.coord("time").units = time_coord.units

    vcube = vcubes.merge_cube()
    vmean = vcube.collapsed("time", iris.analysis.MEAN)
    vmean.coord("time").points = time_coord.points
    vmean.coord("time").units = time_coord.units

    # save mean optical flow components as netcdf files
    for wind_cube in [umean, vmean]:
        file_name = generate_file_name(wind_cube)
        save_netcdf(wind_cube, os.path.join(args.output_dir, file_name))

    # advect latest input data to the required lead times
    if args.extrapolate:

        # generate list of lead times in minutes
        lead_times = np.arange(0, args.max_lead_time+1,
                               args.lead_time_interval)

        if args.nowcast_filepaths:
            if len(args.nowcast_filepaths) != len(lead_times):
                raise ValueError("Require exactly one output file name for "
                                 "each forecast lead time")

        forecast_plugin = CreateExtrapolationForecast(
            original_cube_list[-1], umean, vmean,
            orographic_enhancement_cube=oe_cube, metadata_dict=metadata_dict)
        # extrapolate input data to required lead times
        for i, lead_time in enumerate(lead_times):
            forecast_cube = forecast_plugin.extrapolate(
                leadtime_minutes=lead_time)

            # save to a suitably-named output file
            if args.nowcast_filepaths:
                file_name = args.nowcast_filepaths[i]
            else:
                file_name = os.path.join(
                    args.output_dir, generate_file_name(forecast_cube))
            save_netcdf(forecast_cube, file_name)