def test_no_bounds_exception(self): """Test that an exception is raised if the forecast_period and time coordinates provided do not have bounds.""" self.cube1h15m.coord("forecast_period").bounds = None self.cube1h15m.coord("time").bounds = None msg = "Neither the forecast_period coordinate" with self.assertRaisesRegex(ValueError, msg): generate_file_name(self.cube1h15m, include_period=True)
def test_funny_cube_name(self): """Test cube names are correctly parsed to remove spaces, brackets, slashes, parentheses and uppercase letters""" self.cube.rename("Rainfall rate / (Composite)") name = generate_file_name(self.cube) self.assertEqual( name, "20151119T0030Z-PT0000H15M-rainfall_rate_composite.nc")
def test_time_period_in_minutes_from_time(self): """Test including a period within the filename when the period is in minutes and deduced from the time coordinate.""" self.cube15m.coord("forecast_period").bounds = None name = generate_file_name(self.cube15m, include_period=True) self.assertIsInstance(name, str) self.assertEqual(name, "20151119T0030Z-PT0000H15M-air_temperature-PT15M.nc")
def test_time_period_in_hours_from_forecast_period(self): """Test including a period within the filename when the period is in hours and deduced from the forecast_period coordinate.""" self.cube1h.coord("time").bounds = None name = generate_file_name(self.cube1h, include_period=True) self.assertIsInstance(name, str) self.assertEqual(name, "20151119T0115Z-PT0001H00M-air_temperature-PT01H.nc")
def main(argv=None): """Extrapolate data forward in time.""" parser = ArgParser( description="Extrapolate input data to required lead times.") parser.add_argument("input_filepath", metavar="INPUT_FILEPATH", type=str, help="Path to input NetCDF file.") group = parser.add_mutually_exclusive_group() group.add_argument("--output_dir", metavar="OUTPUT_DIR", type=str, default="", help="Directory to write output files.") group.add_argument("--output_filepaths", nargs="+", type=str, help="List of full paths to output nowcast files, in " "order of increasing lead time.") optflw = parser.add_argument_group('Advect using files containing the x ' ' and y components of the velocity') optflw.add_argument("--eastward_advection_filepath", type=str, help="Path" " to input file containing Eastward advection " "velocities.") optflw.add_argument("--northward_advection_filepath", type=str, help="Path" " to input file containing Northward advection " "velocities.") speed = parser.add_argument_group('Advect using files containing speed and' ' direction') speed.add_argument("--advection_speed_filepath", type=str, help="Path" " to input file containing advection speeds," " usually wind speeds, on multiple pressure levels.") speed.add_argument("--advection_direction_filepath", type=str, help="Path to input file containing the directions from" " which advection speeds are coming (180 degrees from" " the direction in which the speed is directed). The" " directions should be on the same grid as the input" " speeds, including the same vertical levels.") speed.add_argument("--pressure_level", type=int, default=75000, help="The" " pressure level in Pa to extract from the multi-level" " advection_speed and advection_direction files. The" " velocities at this level are used for advection.") parser.add_argument("--orographic_enhancement_filepaths", nargs="+", type=str, default=None, help="List or wildcarded " "file specification to the input orographic " "enhancement files. Orographic enhancement files are " "compulsory for precipitation fields.") parser.add_argument("--json_file", metavar="JSON_FILE", default=None, help="Filename for the json file containing " "required changes to the metadata. Information " "describing the intended contents of the json file " "is available in " "improver.utilities.cube_metadata.amend_metadata." "Every output cube will have the metadata_dict " "applied. Defaults to None.", type=str) parser.add_argument("--max_lead_time", type=int, default=360, help="Maximum lead time required (mins).") parser.add_argument("--lead_time_interval", type=int, default=15, help="Interval between required lead times (mins).") accumulation_args = parser.add_argument_group( 'Calculate accumulations from advected fields') accumulation_args.add_argument( "--accumulation_fidelity", type=int, default=0, help="If set, this CLI will additionally return accumulations" " calculated from the advected fields. This fidelity specifies the" " time interval in minutes between advected fields that is used to" " calculate these accumulations. This interval must be a factor of" " the lead_time_interval.") accumulation_args.add_argument( "--accumulation_units", type=str, default='m', help="Desired units in which the accumulations should be expressed," "e.g. mm") args = parser.parse_args(args=argv) upath, vpath = (args.eastward_advection_filepath, args.northward_advection_filepath) spath, dpath = (args.advection_speed_filepath, args.advection_direction_filepath) # load files and initialise advection plugin input_cube = load_cube(args.input_filepath) if (upath and vpath) and not (spath or dpath): ucube = load_cube(upath) vcube = load_cube(vpath) elif (spath and dpath) and not (upath or vpath): level_constraint = Constraint(pressure=args.pressure_level) try: scube = load_cube(spath, constraints=level_constraint) dcube = load_cube(dpath, constraints=level_constraint) except ValueError as err: raise ValueError( '{} Unable to extract specified pressure level from given ' 'speed and direction files.'.format(err)) ucube, vcube = ResolveWindComponents().process(scube, dcube) else: raise ValueError('Cannot mix advection component velocities with speed' ' and direction') oe_cube = None if args.orographic_enhancement_filepaths: oe_cube = load_cube(args.orographic_enhancement_filepaths) metadata_dict = None if args.json_file: # Load JSON file for metadata amendments. with open(args.json_file, 'r') as input_file: metadata_dict = json.load(input_file) # generate list of lead times in minutes lead_times = np.arange(0, args.max_lead_time+1, args.lead_time_interval) if args.output_filepaths: if len(args.output_filepaths) != len(lead_times): raise ValueError("Require exactly one output file name for each " "forecast lead time") # determine whether accumulations are also to be returned. time_interval = args.lead_time_interval if args.accumulation_fidelity > 0: fraction, _ = np.modf(args.lead_time_interval / args.accumulation_fidelity) if fraction != 0: msg = ("The specified lead_time_interval ({}) is not cleanly " "divisible by the specified accumulation_fidelity ({}). As " "a result the lead_time_interval cannot be constructed from" " accumulation cubes at this fidelity.".format( args.lead_time_interval, args.accumulation_fidelity)) raise ValueError(msg) time_interval = args.accumulation_fidelity lead_times = np.arange(0, args.max_lead_time+1, time_interval) lead_time_filter = args.lead_time_interval // time_interval forecast_plugin = CreateExtrapolationForecast( input_cube, ucube, vcube, orographic_enhancement_cube=oe_cube, metadata_dict=metadata_dict) # extrapolate input data to required lead times forecast_cubes = iris.cube.CubeList() for i, lead_time in enumerate(lead_times): forecast_cubes.append( forecast_plugin.extrapolate(leadtime_minutes=lead_time)) # return rate cubes for i, cube in enumerate(forecast_cubes[::lead_time_filter]): # save to a suitably-named output file if args.output_filepaths: file_name = args.output_filepaths[i] else: file_name = os.path.join( args.output_dir, generate_file_name(cube)) save_netcdf(cube, file_name) # calculate accumulations if required if args.accumulation_fidelity > 0: plugin = Accumulation(accumulation_units=args.accumulation_units, accumulation_period=args.lead_time_interval * 60) accumulation_cubes = plugin.process(forecast_cubes) # return accumulation cubes for i, cube in enumerate(accumulation_cubes): file_name = os.path.join(args.output_dir, generate_file_name(cube)) save_netcdf(cube, file_name)
def test_missing_time(self): """Test error is raised if "time" coordinate is missing""" self.cube.remove_coord("time") with self.assertRaises(CoordinateNotFoundError): _ = generate_file_name(self.cube)
def test_missing_lead_time(self): """Test with missing lead time""" self.cube.remove_coord("forecast_period") name = generate_file_name(self.cube) self.assertEqual(name, "20151119T0030Z-PT0000H00M-air_temperature.nc")
def test_longer_lead_time(self): """Test with lead time > 1 hr""" self.cube.coord("forecast_period").points[0] += 60 name = generate_file_name(self.cube) self.assertEqual(name, "20151119T0030Z-PT0001H15M-air_temperature.nc")
def test_input_cube_unmodified(self): """Test the function does not modify the input cube""" reference_cube = self.cube.copy() _ = generate_file_name(self.cube) self.assertArrayAlmostEqual(self.cube.data, reference_cube.data) self.assertEqual(self.cube.metadata, reference_cube.metadata)
def main(argv=None): """Extrapolate data forward in time.""" parser = ArgParser( description="Extrapolate input data to required lead times.") parser.add_argument("input_filepath", metavar="INPUT_FILEPATH", type=str, help="Path to input NetCDF file.") group = parser.add_mutually_exclusive_group() group.add_argument("--output_dir", metavar="OUTPUT_DIR", type=str, default="", help="Directory to write output files.") group.add_argument("--output_filepaths", nargs="+", type=str, help="List of full paths to output nowcast files, in " "order of increasing lead time.") optflw = parser.add_argument_group('Advect using files containing the x ' ' and y components of the velocity') optflw.add_argument("--eastward_advection_filepath", type=str, help="Path" " to input file containing Eastward advection " "velocities.") optflw.add_argument("--northward_advection_filepath", type=str, help="Path" " to input file containing Northward advection " "velocities.") speed = parser.add_argument_group('Advect using files containing speed and' ' direction') speed.add_argument("--advection_speed_filepath", type=str, help="Path" " to input file containing advection speeds," " usually wind speeds, on multiple pressure levels.") speed.add_argument("--advection_direction_filepath", type=str, help="Path to input file containing the directions from" " which advection speeds are coming (180 degrees from" " the direction in which the speed is directed). The" " directions should be on the same grid as the input" " speeds, including the same vertical levels.") speed.add_argument("--pressure_level", type=int, default=75000, help="The" " pressure level in Pa to extract from the multi-level" " advection_speed and advection_direction files. The" " velocities at this level are used for advection.") parser.add_argument("--orographic_enhancement_filepaths", nargs="+", type=str, default=None, help="List or wildcarded " "file specification to the input orographic " "enhancement files. Orographic enhancement files are " "compulsory for precipitation fields.") parser.add_argument("--json_file", metavar="JSON_FILE", default=None, help="Filename for the json file containing " "required changes to the metadata. Information " "describing the intended contents of the json file " "is available in " "improver.utilities.cube_metadata.amend_metadata." "Every output cube will have the metadata_dict " "applied. Defaults to None.", type=str) parser.add_argument("--max_lead_time", type=int, default=360, help="Maximum lead time required (mins).") parser.add_argument("--lead_time_interval", type=int, default=15, help="Interval between required lead times (mins).") accumulation_args = parser.add_argument_group( 'Calculate accumulations from advected fields') accumulation_args.add_argument( "--accumulation_fidelity", type=int, default=0, help="If set, this CLI will additionally return accumulations" " calculated from the advected fields. This fidelity specifies the" " time interval in minutes between advected fields that is used to" " calculate these accumulations. This interval must be a factor of" " the lead_time_interval.") accumulation_args.add_argument( "--accumulation_period", type=int, default=15, help="The period over which the accumulation is calculated (mins). " "Only full accumulation periods will be computed. At lead times " "that are shorter than the accumulation period, no accumulation " "output will be produced.") accumulation_args.add_argument( "--accumulation_units", type=str, default='m', help="Desired units in which the accumulations should be expressed," "e.g. mm") # Load Cubes args = parser.parse_args(args=argv) metadata_dict = load_json_or_none(args.json_file) upath, vpath = (args.eastward_advection_filepath, args.northward_advection_filepath) spath, dpath = (args.advection_speed_filepath, args.advection_direction_filepath) # load files and initialise advection plugin input_cube = load_cube(args.input_filepath) orographic_enhancement_cube = load_cube( args.orographic_enhancement_filepaths, allow_none=True) speed_cube = direction_cube = ucube = vcube = None if (upath and vpath) and not (spath or dpath): ucube = load_cube(upath) vcube = load_cube(vpath) elif (spath and dpath) and not (upath or vpath): level_constraint = Constraint(pressure=args.pressure_level) try: speed_cube = load_cube(spath, constraints=level_constraint) direction_cube = load_cube(dpath, constraints=level_constraint) except ValueError as err: raise ValueError( '{} Unable to extract specified pressure level from given ' 'speed and direction files.'.format(err)) else: raise ValueError('Cannot mix advection component velocities with speed' ' and direction') # Process Cubes accumulation_cubes, forecast_to_return = process( input_cube, ucube, vcube, speed_cube, direction_cube, orographic_enhancement_cube, metadata_dict, args.max_lead_time, args.lead_time_interval, args.accumulation_fidelity, args.accumulation_period, args.accumulation_units) # Save Cube if args.output_filepaths and \ len(args.output_filepaths) != len(forecast_to_return): raise ValueError("Require exactly one output file name for each " "forecast lead time") for i, cube in enumerate(forecast_to_return): # save to a suitably-named output file if args.output_filepaths: file_name = args.output_filepaths[i] else: file_name = os.path.join(args.output_dir, generate_file_name(cube)) save_netcdf(cube, file_name) if args.accumulation_fidelity > 0: # return accumulation cubes for i, cube in enumerate(accumulation_cubes): file_name = os.path.join(args.output_dir, generate_file_name(cube)) save_netcdf(cube, file_name)
def test_parameter_name(self): """Test basic file name generation""" name = generate_file_name(self.cube, parameter='another_temperature') self.assertEqual( name, "20151119T0030Z-PT0000H15M-another_temperature.nc")
def test_longer_lead_time(self): """Test with lead time > 1 hr""" self.cube15m.coord("forecast_period").points = (np.array( [75 * 60], dtype=np.int32)) name = generate_file_name(self.cube15m) self.assertEqual(name, "20151119T0030Z-PT0001H15M-air_temperature.nc")
def test_hours_and_minutes_exception(self): """Test that an exception is raised if the difference between the bounds is greater than 1 hour and not equal to a whole hour.""" msg = "If the difference between the bounds of the" with self.assertRaisesRegex(ValueError, msg): generate_file_name(self.cube1h15m, include_period=True)
def main(argv=None): """Calculate optical flow advection velocities and (optionally) extrapolate data.""" parser = ArgParser( description="Calculate optical flow components from input fields " "and (optionally) extrapolate to required lead times.") parser.add_argument("input_filepaths", metavar="INPUT_FILEPATHS", nargs=3, type=str, help="Paths to the input radar " "files. There should be 3 input files at T, T-1 and " "T-2 from which to calculate optical flow velocities. " "The files require a 'time' coordinate on which they " "are sorted, so the order of inputs does not matter.") parser.add_argument("--output_dir", metavar="OUTPUT_DIR", type=str, default='', help="Directory to write all output files," " or only advection velocity components if " "NOWCAST_FILEPATHS is specified.") parser.add_argument("--nowcast_filepaths", nargs="+", type=str, default=None, help="Optional list of full paths to " "output nowcast files. Overrides OUTPUT_DIR. Ignored " "unless '--extrapolate' is set.") parser.add_argument("--orographic_enhancement_filepaths", nargs="+", type=str, default=None, help="List or wildcarded " "file specification to the input orographic " "enhancement files. Orographic enhancement files are " "compulsory for precipitation fields.") parser.add_argument("--json_file", metavar="JSON_FILE", default=None, help="Filename for the json file containing " "required changes to the metadata. Information " "describing the intended contents of the json file " "is available in " "improver.utilities.cube_metadata.amend_metadata." "Every output cube will have the metadata_dict " "applied. Defaults to None.", type=str) # OpticalFlow plugin configurable parameters parser.add_argument("--ofc_box_size", type=int, default=30, help="Size of " "square 'box' (in grid squares) within which to solve " "the optical flow equations.") parser.add_argument("--smart_smoothing_iterations", type=int, default=100, help="Number of iterations to perform in enforcing " "smoothness constraint for optical flow velocities.") # AdvectField options parser.add_argument("--extrapolate", action="store_true", default=False, help="Optional flag to advect current data forward to " "specified lead times.") parser.add_argument("--max_lead_time", type=int, default=360, help="Maximum lead time required (mins). Ignored " "unless '--extrapolate' is set.") parser.add_argument("--lead_time_interval", type=int, default=15, help="Interval between required lead times (mins). " "Ignored unless '--extrapolate' is set.") args = parser.parse_args(args=argv) # Load Cubes and JSON. metadata_dict = load_json_or_none(args.json_file) original_cube_list = load_cubelist(args.input_filepaths) oe_cube = load_cube(args.orographic_enhancement_filepaths, allow_none=True) # Process forecast_cubes, u_and_v_mean = process(original_cube_list, oe_cube, metadata_dict, args.ofc_box_size, args.smart_smoothing_iterations, args.extrapolate, args.max_lead_time, args.lead_time_interval) # Save Cubes for wind_cube in u_and_v_mean: file_name = generate_file_name(wind_cube) save_netcdf(wind_cube, os.path.join(args.output_dir, file_name)) # advect latest input data to the required lead times if args.extrapolate: if args.nowcast_filepaths: if len(args.nowcast_filepaths) != len(forecast_cubes): raise ValueError("Require exactly one output file name for " "each forecast lead time") for i, cube in enumerate(forecast_cubes): # save to a suitably-named output file if args.nowcast_filepaths: file_name = args.nowcast_filepaths[i] else: file_name = os.path.join(args.output_dir, generate_file_name(cube)) save_netcdf(cube, file_name)
def test_basic(self): """Test basic file name generation""" name = generate_file_name(self.cube) self.assertIsInstance(name, str) self.assertEqual(name, "20151119T0030Z-PT0000H15M-air_temperature.nc")
def main(argv=None): """Calculate orographic enhancement of precipitation from model pressure, temperature, relative humidity and wind input files""" parser = ArgParser(description='Calculate orographic enhancement using the' ' ResolveWindComponents() and OrographicEnhancement() ' 'plugins. Outputs data on the high resolution orography' ' grid and regridded to the coarser resolution of the ' 'input diagnostic variables.') parser.add_argument('temperature_filepath', metavar='TEMPERATURE_FILEPATH', help='Full path to input NetCDF file of temperature on' ' height levels') parser.add_argument('humidity_filepath', metavar='HUMIDITY_FILEPATH', help='Full path to input NetCDF file of relative ' 'humidity on height levels') parser.add_argument('pressure_filepath', metavar='PRESSURE_FILEPATH', help='Full path to input NetCDF file of pressure on ' 'height levels') parser.add_argument('windspeed_filepath', metavar='WINDSPEED_FILEPATH', help='Full path to input NetCDF file of wind speed on ' 'height levels') parser.add_argument('winddir_filepath', metavar='WINDDIR_FILEPATH', help='Full path to input NetCDF file of wind direction' ' on height levels') parser.add_argument('orography_filepath', metavar='OROGRAPHY_FILEPATH', help='Full path to input NetCDF high resolution ' 'orography ancillary. This should be on the same or a ' 'finer resolution grid than the input variables, and ' 'defines the grid on which the orographic enhancement ' 'will be calculated.') parser.add_argument('output_dir', metavar='OUTPUT_DIR', help='Directory ' 'to write output orographic enhancement files') parser.add_argument('--boundary_height', type=float, default=1000., help='Model height level to extract variables for ' 'calculating orographic enhancement, as proxy for ' 'the boundary layer.') parser.add_argument('--boundary_height_units', type=str, default='m', help='Units of the boundary height specified for ' 'extracting model levels.') args = parser.parse_args(args=argv) constraint_info = (args.boundary_height, args.boundary_height_units) temperature = load_and_extract(args.temperature_filepath, *constraint_info) humidity = load_and_extract(args.humidity_filepath, *constraint_info) pressure = load_and_extract(args.pressure_filepath, *constraint_info) wind_speed = load_and_extract(args.windspeed_filepath, *constraint_info) wind_dir = load_and_extract(args.winddir_filepath, *constraint_info) # load high resolution orography orography = load_cube(args.orography_filepath) orogenh_high_res, orogenh_standard = process(temperature, humidity, pressure, wind_speed, wind_dir, orography) # generate file names fname_standard = os.path.join(args.output_dir, generate_file_name(orogenh_standard)) fname_high_res = os.path.join( args.output_dir, generate_file_name(orogenh_high_res, parameter="orographic_enhancement_high_resolution")) # save output files save_netcdf(orogenh_standard, fname_standard) save_netcdf(orogenh_high_res, fname_high_res)
def main(argv=None): """Calculate optical flow advection velocities and (optionally) extrapolate data.""" parser = ArgParser( description="Calculate optical flow components from input fields " "and (optionally) extrapolate to required lead times.") parser.add_argument("input_filepaths", metavar="INPUT_FILEPATHS", nargs=3, type=str, help="Paths to the input radar " "files. There should be 3 input files at T, T-1 and " "T-2 from which to calculate optical flow velocities. " "The files require a 'time' coordinate on which they " "are sorted, so the order of inputs does not matter.") parser.add_argument("--output_dir", metavar="OUTPUT_DIR", type=str, default='', help="Directory to write all output files," " or only advection velocity components if " "NOWCAST_FILEPATHS is specified.") parser.add_argument("--nowcast_filepaths", nargs="+", type=str, default=None, help="Optional list of full paths to " "output nowcast files. Overrides OUTPUT_DIR. Ignored " "unless '--extrapolate' is set.") parser.add_argument("--orographic_enhancement_filepaths", nargs="+", type=str, default=None, help="List or wildcarded " "file specification to the input orographic " "enhancement files. Orographic enhancement files are " "compulsory for precipitation fields.") parser.add_argument("--json_file", metavar="JSON_FILE", default=None, help="Filename for the json file containing " "required changes to the metadata. Information " "describing the intended contents of the json file " "is available in " "improver.utilities.cube_metadata.amend_metadata." "Every output cube will have the metadata_dict " "applied. Defaults to None.", type=str) # OpticalFlow plugin configurable parameters parser.add_argument("--ofc_box_size", type=int, default=30, help="Size of " "square 'box' (in grid squares) within which to solve " "the optical flow equations.") parser.add_argument("--smart_smoothing_iterations", type=int, default=100, help="Number of iterations to perform in enforcing " "smoothness constraint for optical flow velocities.") # AdvectField options parser.add_argument("--extrapolate", action="store_true", default=False, help="Optional flag to advect current data forward to " "specified lead times.") parser.add_argument("--max_lead_time", type=int, default=360, help="Maximum lead time required (mins). Ignored " "unless '--extrapolate' is set.") parser.add_argument("--lead_time_interval", type=int, default=15, help="Interval between required lead times (mins). " "Ignored unless '--extrapolate' is set.") args = parser.parse_args(args=argv) # read input data original_cube_list = load_cubelist(args.input_filepaths) if args.orographic_enhancement_filepaths: # Subtract orographic enhancement oe_cube = load_cube(args.orographic_enhancement_filepaths) cube_list = ApplyOrographicEnhancement("subtract").process( original_cube_list, oe_cube) else: cube_list = original_cube_list if any("precipitation_rate" in cube.name() for cube in cube_list): cube_names = [cube.name() for cube in cube_list] msg = ("For precipitation fields, orographic enhancement " "filepaths must be supplied. The names of the cubes " "supplied were: {}".format(cube_names)) raise ValueError(msg) # order input files by validity time cube_list.sort(key=lambda x: x.coord("time").points[0]) time_coord = cube_list[-1].coord("time") metadata_dict = None if args.json_file: # Load JSON file for metadata amendments. with open(args.json_file, 'r') as input_file: metadata_dict = json.load(input_file) # calculate optical flow velocities from T-1 to T and T-2 to T-1 ofc_plugin = OpticalFlow(iterations=args.smart_smoothing_iterations, metadata_dict=metadata_dict) ucubes = iris.cube.CubeList([]) vcubes = iris.cube.CubeList([]) for older_cube, newer_cube in zip(cube_list[:-1], cube_list[1:]): ucube, vcube = ofc_plugin.process(older_cube, newer_cube, boxsize=args.ofc_box_size) ucubes.append(ucube) vcubes.append(vcube) # average optical flow velocity components ucube = ucubes.merge_cube() umean = ucube.collapsed("time", iris.analysis.MEAN) umean.coord("time").points = time_coord.points umean.coord("time").units = time_coord.units vcube = vcubes.merge_cube() vmean = vcube.collapsed("time", iris.analysis.MEAN) vmean.coord("time").points = time_coord.points vmean.coord("time").units = time_coord.units # save mean optical flow components as netcdf files for wind_cube in [umean, vmean]: file_name = generate_file_name(wind_cube) save_netcdf(wind_cube, os.path.join(args.output_dir, file_name)) # advect latest input data to the required lead times if args.extrapolate: # generate list of lead times in minutes lead_times = np.arange(0, args.max_lead_time+1, args.lead_time_interval) if args.nowcast_filepaths: if len(args.nowcast_filepaths) != len(lead_times): raise ValueError("Require exactly one output file name for " "each forecast lead time") forecast_plugin = CreateExtrapolationForecast( original_cube_list[-1], umean, vmean, orographic_enhancement_cube=oe_cube, metadata_dict=metadata_dict) # extrapolate input data to required lead times for i, lead_time in enumerate(lead_times): forecast_cube = forecast_plugin.extrapolate( leadtime_minutes=lead_time) # save to a suitably-named output file if args.nowcast_filepaths: file_name = args.nowcast_filepaths[i] else: file_name = os.path.join( args.output_dir, generate_file_name(forecast_cube)) save_netcdf(forecast_cube, file_name)