Ejemplo n.º 1
0
    def test_raises_exception_for_unevenly_spaced_cubes(self):
        """Test function raises an exception if the input cubes are not
        spaced equally in time."""

        last_time = self.cubes[-1].coord('time').points
        self.cubes[-1].coord('time').points = last_time + 60

        msg = ("Accumulation is designed to work with rates "
               "cubes at regular time intervals.")
        plugin = Accumulation(accumulation_period=120)

        with self.assertRaisesRegex(ValueError, msg):
            plugin.process(self.cubes)
Ejemplo n.º 2
0
 def test_raises_exception_for_small_accumulation_period(self):
     """Test that if the forecast period of the upper bound cube is
     not within the list of requested forecast periods, then the
     subset of cubes returned is equal to None."""
     msg = ("The accumulation_period is less than the time interval "
            "between the rates cubes. The rates cubes provided are "
            "therefore insufficient for computing the accumulation period "
            "requested.")
     reduced_cubelist = iris.cube.CubeList([self.cubes[0], self.cubes[-1]])
     plugin = Accumulation(accumulation_period=5 * 60,
                           forecast_periods=np.array([5]) * 60)
     with self.assertRaisesRegex(ValueError, msg):
         plugin.process(reduced_cubelist)
Ejemplo n.º 3
0
    def test_returns_total_accumulation_if_no_period_specified(self):
        """Test function returns a list containing a single accumulation cube
        that is the accumulation over the whole period specified by the rates
        cubes. The results are the same as the 10 minute test above as that is
        the total span of the input rates cubes. Check that the number of
        accumulation cubes returned is the expected number."""

        expected_t0 = np.array(
            [[
                0.015, 0.045, 0.075, 0.105, 0.135, 0.195, 0.285, 0.375, 0.465,
                0.555
            ],
             [
                 0.015, 0.045, 0.075, 0.105, 0.135, np.nan, np.nan, np.nan,
                 np.nan, np.nan
             ], [0., 0., 0., 0., 0., np.nan, np.nan, np.nan, np.nan, np.nan],
             [0., 0., 0., 0., 0., 0.045, 0.135, 0.225, 0.315, 0.405]])

        expected_mask_t0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                                     [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        plugin = Accumulation(accumulation_units='mm')
        result = plugin.process(self.cubes)

        self.assertArrayAlmostEqual(result[0].data, expected_t0)
        self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
        self.assertEqual(len(result), 1)
Ejemplo n.º 4
0
    def test_returns_expected_values_10_minutes(self):
        """Test function returns the expected accumulations over the complete
        10 minute aggregation period. These are written out long hand to make
        the comparison easy. Note that the test have been constructed such that
        only the top row is expected to show a difference by including the last
        5 minutes of the accumulation, all the other results are the same as
        for the 5 minute test above. Check that the number of accumulation
        cubes returned is the expected number."""

        expected_t0 = np.array(
            [[
                0.015, 0.045, 0.075, 0.105, 0.135, 0.195, 0.285, 0.375, 0.465,
                0.555
            ],
             [
                 0.015, 0.045, 0.075, 0.105, 0.135, np.nan, np.nan, np.nan,
                 np.nan, np.nan
             ], [0., 0., 0., 0., 0., np.nan, np.nan, np.nan, np.nan, np.nan],
             [0., 0., 0., 0., 0., 0.045, 0.135, 0.225, 0.315, 0.405]])

        expected_mask_t0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                                     [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        plugin = Accumulation(accumulation_period=600,
                              accumulation_units='mm',
                              forecast_periods=[600])
        result = plugin.process(self.cubes)

        self.assertArrayAlmostEqual(result[0].data, expected_t0)
        self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
        self.assertEqual(len(result), 1)
Ejemplo n.º 5
0
    def test_returns_cubelist(self):
        """Test function returns a cubelist."""

        plugin = Accumulation(accumulation_period=60,
                              forecast_periods=self.forecast_periods)
        result = plugin.process(self.cubes)
        self.assertIsInstance(result, iris.cube.CubeList)
Ejemplo n.º 6
0
    def test_returns_expected_values_1_minute(self):
        """Test function returns the expected accumulations over a 1 minute
        aggregation period. Check that the number of accumulation cubes
        returned is the expected number."""

        expected_t0 = np.array(
            [[0.015, 0.03, 0.03, 0.03, 0.03, 0.06, 0.09, 0.09, 0.09, 0.09],
             [0.015, 0.03, 0.03, 0.03, 0.03, np.nan, np.nan, 0.09, 0.09, 0.09],
             [0., 0., 0., 0., 0., np.nan, np.nan, 0.09, 0.09, 0.09],
             [0., 0., 0., 0., 0., 0.045, 0.09, 0.09, 0.09, 0.09]])

        expected_t7 = np.array(
            [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.03, 0.03],
             [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.03, 0.03],
             [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
             [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])

        expected_mask_t0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        expected_mask_t7 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        plugin = Accumulation(accumulation_period=60, accumulation_units='mm')
        result = plugin.process(self.cubes)

        self.assertArrayAlmostEqual(result[0].data, expected_t0)
        self.assertArrayAlmostEqual(result[7].data, expected_t7)
        self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
        self.assertArrayAlmostEqual(result[7].data.mask, expected_mask_t7)
        self.assertEqual(len(result), 10)
Ejemplo n.º 7
0
    def test_returns_expected_values_5_minutes(self):
        """Test function returns the expected accumulations over a 5 minute
        aggregation period. These are written out long hand to make the
        comparison easy. Check that the number of accumulation cubes returned
        is the expected number."""

        expected_t0 = np.array([
            [0.015, 0.045, 0.075, 0.105, 0.135, 0.18, 0.24, 0.3, 0.36, 0.42],
            [
                0.015,
                0.045,
                0.075,
                0.105,
                0.135,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
            ],
            [0.0, 0.0, 0.0, 0.0, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.045, 0.135, 0.225, 0.315, 0.405],
        ])

        expected_t1 = np.array([
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.045, 0.075, 0.105, 0.135],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.045, 0.075, 0.105, 0.135],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
        ])

        expected_mask_t0 = np.array([
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
            [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ])

        expected_mask_t1 = np.array([
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ])

        plugin = Accumulation(
            accumulation_period=300,
            accumulation_units="mm",
            forecast_periods=[300, 600],
        )
        result = plugin.process(self.cubes)

        self.assertArrayAlmostEqual(result[0].data, expected_t0)
        self.assertArrayAlmostEqual(result[1].data, expected_t1)
        self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
        self.assertArrayAlmostEqual(result[1].data.mask, expected_mask_t1)
        self.assertEqual(len(result), 2)
Ejemplo n.º 8
0
    def test_does_not_use_incomplete_period_data(self):
        """Test function returns only 2 accumulation periods when a 4 minute
        aggregation period is used with 10 minutes of input data. The trailing
        2 cubes are insufficient to create another period and so are discarded.
        A warning is raised by the chunking function and has been tested above,
        so is ignored here.
        """

        plugin = Accumulation(accumulation_period=240, forecast_periods=[240, 480])
        result = plugin.process(self.cubes)
        self.assertEqual(len(result), 2)
Ejemplo n.º 9
0
    def test_accumulation_length(self):
        """Test to check that the length of the accumulation period is
        consistent across all output cubes. Only complete periods are
        required."""

        accumulation_length = 120
        plugin = Accumulation(accumulation_period=accumulation_length,
                              forecast_periods=self.forecast_periods)
        result = plugin.process(self.cubes)
        for cube in result:
            self.assertEqual(np.diff(cube.coord("forecast_period").bounds),
                             accumulation_length)
Ejemplo n.º 10
0
    def test_default_altered_output_units(self):
        """Test the function returns accumulations in the specified units if
        they are explicitly set. Here the units are set to mm."""

        # Multiply the rates in mm/s by 60 to get accumulation over 1 minute
        expected = self.cubes[0].copy(
            data=(0.5 * (self.cubes[0].data + self.cubes[1].data) * 60))

        plugin = Accumulation(accumulation_units='mm', accumulation_period=60)
        result = plugin.process(self.cubes)

        self.assertEqual(result[0].units, 'mm')
        self.assertArrayAlmostEqual(result[0].data, expected.data)
Ejemplo n.º 11
0
    def test_default_output_units(self):
        """Test the function returns accumulations in the default units if no
        units are explicitly set, where the default is metres."""

        # Multiply the rates in mm/s by 60 to get accumulation over 1 minute
        # and divide by 1000 to get into metres.
        expected = self.cubes[0].copy(
            data=(0.5 * (self.cubes[0].data + self.cubes[1].data) * 60 / 1000))

        plugin = Accumulation(accumulation_period=60)
        result = plugin.process(self.cubes)

        self.assertEqual(result[0].units, 'm')
        self.assertArrayAlmostEqual(result[0].data, expected.data)
Ejemplo n.º 12
0
def main(argv=None):
    """Extrapolate data forward in time."""

    parser = ArgParser(
        description="Extrapolate input data to required lead times.")
    parser.add_argument("input_filepath", metavar="INPUT_FILEPATH",
                        type=str, help="Path to input NetCDF file.")

    group = parser.add_mutually_exclusive_group()
    group.add_argument("--output_dir", metavar="OUTPUT_DIR", type=str,
                       default="", help="Directory to write output files.")
    group.add_argument("--output_filepaths", nargs="+", type=str,
                       help="List of full paths to output nowcast files, in "
                       "order of increasing lead time.")

    optflw = parser.add_argument_group('Advect using files containing the x '
                                       ' and y components of the velocity')
    optflw.add_argument("--eastward_advection_filepath", type=str, help="Path"
                        " to input file containing Eastward advection "
                        "velocities.")
    optflw.add_argument("--northward_advection_filepath", type=str, help="Path"
                        " to input file containing Northward advection "
                        "velocities.")

    speed = parser.add_argument_group('Advect using files containing speed and'
                                      ' direction')
    speed.add_argument("--advection_speed_filepath", type=str, help="Path"
                       " to input file containing advection speeds,"
                       " usually wind speeds, on multiple pressure levels.")
    speed.add_argument("--advection_direction_filepath", type=str,
                       help="Path to input file containing the directions from"
                       " which advection speeds are coming (180 degrees from"
                       " the direction in which the speed is directed). The"
                       " directions should be on the same grid as the input"
                       " speeds, including the same vertical levels.")
    speed.add_argument("--pressure_level", type=int, default=75000, help="The"
                       " pressure level in Pa to extract from the multi-level"
                       " advection_speed and advection_direction files. The"
                       " velocities at this level are used for advection.")
    parser.add_argument("--orographic_enhancement_filepaths", nargs="+",
                        type=str, default=None, help="List or wildcarded "
                        "file specification to the input orographic "
                        "enhancement files. Orographic enhancement files are "
                        "compulsory for precipitation fields.")
    parser.add_argument("--json_file", metavar="JSON_FILE", default=None,
                        help="Filename for the json file containing "
                        "required changes to the metadata. Information "
                        "describing the intended contents of the json file "
                        "is available in "
                        "improver.utilities.cube_metadata.amend_metadata."
                        "Every output cube will have the metadata_dict "
                        "applied. Defaults to None.", type=str)
    parser.add_argument("--max_lead_time", type=int, default=360,
                        help="Maximum lead time required (mins).")
    parser.add_argument("--lead_time_interval", type=int, default=15,
                        help="Interval between required lead times (mins).")

    accumulation_args = parser.add_argument_group(
        'Calculate accumulations from advected fields')
    accumulation_args.add_argument(
        "--accumulation_fidelity", type=int, default=0,
        help="If set, this CLI will additionally return accumulations"
        " calculated from the advected fields. This fidelity specifies the"
        " time interval in minutes between advected fields that is used to"
        " calculate these accumulations. This interval must be a factor of"
        " the lead_time_interval.")
    accumulation_args.add_argument(
        "--accumulation_units", type=str, default='m',
        help="Desired units in which the accumulations should be expressed,"
        "e.g. mm")

    args = parser.parse_args(args=argv)

    upath, vpath = (args.eastward_advection_filepath,
                    args.northward_advection_filepath)
    spath, dpath = (args.advection_speed_filepath,
                    args.advection_direction_filepath)

    # load files and initialise advection plugin
    input_cube = load_cube(args.input_filepath)
    if (upath and vpath) and not (spath or dpath):
        ucube = load_cube(upath)
        vcube = load_cube(vpath)
    elif (spath and dpath) and not (upath or vpath):
        level_constraint = Constraint(pressure=args.pressure_level)
        try:
            scube = load_cube(spath, constraints=level_constraint)
            dcube = load_cube(dpath, constraints=level_constraint)
        except ValueError as err:
            raise ValueError(
                '{} Unable to extract specified pressure level from given '
                'speed and direction files.'.format(err))

        ucube, vcube = ResolveWindComponents().process(scube, dcube)
    else:
        raise ValueError('Cannot mix advection component velocities with speed'
                         ' and direction')

    oe_cube = None
    if args.orographic_enhancement_filepaths:
        oe_cube = load_cube(args.orographic_enhancement_filepaths)

    metadata_dict = None
    if args.json_file:
        # Load JSON file for metadata amendments.
        with open(args.json_file, 'r') as input_file:
            metadata_dict = json.load(input_file)

    # generate list of lead times in minutes
    lead_times = np.arange(0, args.max_lead_time+1,
                           args.lead_time_interval)

    if args.output_filepaths:
        if len(args.output_filepaths) != len(lead_times):
            raise ValueError("Require exactly one output file name for each "
                             "forecast lead time")

    # determine whether accumulations are also to be returned.
    time_interval = args.lead_time_interval
    if args.accumulation_fidelity > 0:
        fraction, _ = np.modf(args.lead_time_interval /
                              args.accumulation_fidelity)
        if fraction != 0:
            msg = ("The specified lead_time_interval ({}) is not cleanly "
                   "divisible by the specified accumulation_fidelity ({}). As "
                   "a result the lead_time_interval cannot be constructed from"
                   " accumulation cubes at this fidelity.".format(
                       args.lead_time_interval, args.accumulation_fidelity))
            raise ValueError(msg)

        time_interval = args.accumulation_fidelity
        lead_times = np.arange(0, args.max_lead_time+1, time_interval)

    lead_time_filter = args.lead_time_interval // time_interval

    forecast_plugin = CreateExtrapolationForecast(
        input_cube, ucube, vcube, orographic_enhancement_cube=oe_cube,
        metadata_dict=metadata_dict)

    # extrapolate input data to required lead times
    forecast_cubes = iris.cube.CubeList()
    for i, lead_time in enumerate(lead_times):
        forecast_cubes.append(
            forecast_plugin.extrapolate(leadtime_minutes=lead_time))

    # return rate cubes
    for i, cube in enumerate(forecast_cubes[::lead_time_filter]):
        # save to a suitably-named output file
        if args.output_filepaths:
            file_name = args.output_filepaths[i]
        else:
            file_name = os.path.join(
                args.output_dir, generate_file_name(cube))
        save_netcdf(cube, file_name)

    # calculate accumulations if required
    if args.accumulation_fidelity > 0:
        plugin = Accumulation(accumulation_units=args.accumulation_units,
                              accumulation_period=args.lead_time_interval * 60)
        accumulation_cubes = plugin.process(forecast_cubes)

        # return accumulation cubes
        for i, cube in enumerate(accumulation_cubes):
            file_name = os.path.join(args.output_dir, generate_file_name(cube))
            save_netcdf(cube, file_name)
Ejemplo n.º 13
0
def process(input_cube,
            u_cube,
            v_cube,
            speed_cube,
            direction_cube,
            orographic_enhancement_cube=None,
            metadata_dict=None,
            max_lead_time=360,
            lead_time_interval=15,
            accumulation_fidelity=0,
            accumulation_period=15,
            accumulation_units='m'):
    """Module  to extrapolate input cubes given advection velocity fields.

    Args:
        input_cube (iris.cube.Cube):
            The input Cube to be processed.
        u_cube (iris.cube.Cube):
            Cube with the velocities in the x direction.
            Must be used with v_cube.
            s_cube and d_cube must be None.
        v_cube (iris.cube.Cube):
            Cube with the velocities in the y direction.
            Must be used with u_cube.
            s_cube and d_cube must be None.
        speed_cube (iris.cube.Cube):
            Cube containing advection speeds, usually wind speed.
            Must be used with d_cube.
            u_cube and v_cube must be None.
        direction_cube (iris.cube.Cube):
            Cube from which advection speeds are coming. The directions
            should be on the same grid as the input speeds, including the same
            vertical levels.
            Must be used with d_cube.
            u_cube and v_cube must be None.
        orographic_enhancement_cube (iris.cube.Cube):
            Cube containing the orographic enhancement fields. May have data
            for multiple times in the cube.
            Default is None.
        metadata_dict (dict):
            Dictionary containing the required changes to the metadata.
            Information describing the intended contents of the dictionary
            is available in improver.utilities.cube_metadata.amend_metadata.
            Every output cube will have the metadata_dict applied.
            Default is None.
        max_lead_time (int):
            Maximum lead time required (mins).
            Default is 360.
        lead_time_interval (int):
            Interval between required lead times (mins).
            Default is 15.
        accumulation_fidelity (int):
            If set, this will additionally return accumulations calculated
            from the advected fields. This fidelity specifies the time
            interval in minutes between advected fields that is used to
            calculate these accumulations. This interval must be a factor of
            the lead_time_interval.
            Default is 0.
        accumulation_period (int):
            The period over which the accumulation is calculated (mins).
            Only full accumulation periods will be computed. At lead times
            that are shorter than the accumulation period, no accumulation
            output will be produced.
        accumulation_units (str):
            Desired units in which the accumulations should be expressed.
            e.g. 'mm'
            Default is 'm'.

    Returns:
        (tuple) tuple containing:
            **accumulation_cubes** (iris.cube.Cubelist):
                A cubelist containing precipitation accumulation cubes where
                the accumulation periods are determined by the
                lead_time_interval.
            **forecast_to_return** (iris.cube.Cubelist):
                New cubes with updated time and extrapolated data.

    Raises:
        ValueError:
            can either use s_cube and d_cube or u_cube and v_cube.
            Therefore: (s and d)⊕(u and v)
        ValueError:
            If accumulation_fidelity is greater than 0 and max_lead_time is not
            cleanly divisible by accumulation_fidelity.
    """

    if (speed_cube and direction_cube) and not (u_cube or v_cube):
        u_cube, v_cube = ResolveWindComponents().process(
            speed_cube, direction_cube)
    elif (u_cube or v_cube) and (speed_cube or direction_cube):
        raise ValueError('Cannot mix advection component velocities with speed'
                         ' and direction')
    # generate list of lead times in minutes
    lead_times = np.arange(0, max_lead_time + 1, lead_time_interval)

    # determine whether accumulations are also to be returned.
    time_interval = lead_time_interval
    if accumulation_fidelity > 0:
        fraction, _ = np.modf(max_lead_time / accumulation_fidelity)
        if fraction != 0:
            msg = ("The specified lead_time_interval ({}) is not cleanly "
                   "divisible by the specified accumulation_fidelity ({}). As "
                   "a result the lead_time_interval cannot be constructed from"
                   " accumulation cubes at this fidelity.")
            raise ValueError(
                msg.format(lead_time_interval, accumulation_fidelity))

        time_interval = accumulation_fidelity
        lead_times = np.arange(0, max_lead_time + 1, time_interval)

    lead_time_filter = lead_time_interval // time_interval
    forecast_plugin = CreateExtrapolationForecast(
        input_cube,
        u_cube,
        v_cube,
        orographic_enhancement_cube=orographic_enhancement_cube,
        metadata_dict=metadata_dict)

    # extrapolate input data to required lead times
    forecast_cubes = iris.cube.CubeList()
    for lead_time in lead_times:
        forecast_cubes.append(
            forecast_plugin.extrapolate(leadtime_minutes=lead_time))

    forecast_to_return = forecast_cubes[::lead_time_filter].copy()
    # return rate cubes
    # calculate accumulations if required
    accumulation_cubes = None
    if accumulation_fidelity > 0:
        lead_times = (np.arange(lead_time_interval, max_lead_time + 1,
                                lead_time_interval))
        plugin = Accumulation(accumulation_units=accumulation_units,
                              accumulation_period=accumulation_period * 60,
                              forecast_periods=lead_times * 60)
        accumulation_cubes = plugin.process(forecast_cubes)

    return accumulation_cubes, forecast_to_return