def setUp(self): """Set up plugin options and input rainfall-like matrices that produce non-singular outputs. Large matrices with zeros are needed for the smoothing algorithms to behave sensibly.""" self.plugin = OpticalFlow(iterations=10) self.plugin.data_smoothing_radius = 3 self.plugin.boxsize = 3 rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]]) first_input = np.zeros((10, 10)) first_input[1:8, 2:9] = rainfall_block self.plugin.data1 = first_input self.plugin.shape = first_input.shape second_input = np.zeros((10, 10)) second_input[2:9, 1:8] = rainfall_block self.plugin.data2 = second_input self.partial_dx = self.plugin._partial_derivative_spatial(axis=1) self.partial_dy = self.plugin._partial_derivative_spatial(axis=0) self.partial_dt = self.plugin._partial_derivative_temporal()
def setUp(self): """Set up plugin options and input rainfall-like matrices that produce non-singular outputs. Large matrices with zeros are needed for the smoothing algorithms to behave sensibly.""" self.plugin = OpticalFlow(iterations=20) self.plugin.boxsize = 3 self.smoothing_kernel = 3 rainfall_block = np.array( [ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ] ) self.first_input = np.zeros((16, 16), dtype=np.float32) self.first_input[1:8, 2:9] = rainfall_block self.second_input = np.zeros((16, 16), dtype=np.float32) self.second_input[2:9, 1:8] = rainfall_block
def setUp(self): """Set up plugin and input rainfall-like cubes""" self.plugin = OpticalFlow(iterations=20) self.plugin.data_smoothing_radius_km = np.float32(6.) coord_points = 2000 * np.arange(16, dtype=np.float32) # in metres rainfall_block = np.array( [[1., 1., 1., 1., 1., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]], dtype=np.float32) data1 = np.zeros((16, 16), dtype=np.float32) data1[1:8, 2:9] = rainfall_block self.cube1 = set_up_variable_cube(data1, name="rainfall_rate", units="mm h-1", spatial_grid="equalarea", time=datetime(2018, 2, 20, 4, 0), frt=datetime(2018, 2, 20, 4, 0)) self.cube1.coord(axis='x').points = coord_points self.cube1.coord(axis='y').points = coord_points data2 = np.zeros((16, 16), dtype=np.float32) data2[2:9, 1:8] = rainfall_block self.cube2 = set_up_variable_cube(data2, name="rainfall_rate", units="mm h-1", spatial_grid="equalarea", time=datetime(2018, 2, 20, 4, 15), frt=datetime(2018, 2, 20, 4, 15)) self.cube2.coord(axis='x').points = coord_points self.cube2.coord(axis='y').points = coord_points
class Test_calculate_displacement_vectors(IrisTest): """Test calculation of advection displacement vectors""" def setUp(self): """Set up plugin options and input rainfall-like matrices that produce non-singular outputs. Large matrices with zeros are needed for the smoothing algorithms to behave sensibly.""" self.plugin = OpticalFlow(iterations=20) self.plugin.data_smoothing_radius = 3 self.plugin.boxsize = 3 rainfall_block = np.array( [ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ] ) first_input = np.zeros((10, 10), dtype=np.float32) first_input[1:8, 2:9] = rainfall_block self.plugin.data1 = first_input self.plugin.shape = first_input.shape second_input = np.zeros((10, 10), dtype=np.float32) second_input[2:9, 1:8] = rainfall_block self.plugin.data2 = second_input self.partial_dx = self.plugin._partial_derivative_spatial(axis=1) self.partial_dy = self.plugin._partial_derivative_spatial(axis=0) self.partial_dt = self.plugin._partial_derivative_temporal() def test_basic(self): """Test outputs are of the correct type""" umat, _ = self.plugin.calculate_displacement_vectors( self.partial_dx, self.partial_dy, self.partial_dt ) self.assertIsInstance(umat, np.ndarray) self.assertSequenceEqual(umat.shape, self.plugin.shape) def test_values(self): """Test output values""" umat, vmat = self.plugin.calculate_displacement_vectors( self.partial_dx, self.partial_dy, self.partial_dt ) self.assertAlmostEqual(np.mean(umat), np.float32(-0.124607998)) self.assertAlmostEqual(np.mean(vmat), np.float32(0.124607998))
def test_metadata(self): """Test correct output types and metadata""" metadata_dict = {"attributes": { "mosg__grid_version": "1.0.0", "mosg__model_configuration": "nc_det", "source": "Met Office Nowcast", "institution": "Met Office", "title": "Nowcast on UK 2 km Standard Grid"}} plugin = OpticalFlow(iterations=20, metadata_dict=metadata_dict) plugin.data_smoothing_radius_km = 6. ucube, vcube = plugin.process(self.cube1, self.cube2, boxsize=3) for cube in [ucube, vcube]: self.assertEqual(cube.attributes, metadata_dict["attributes"])
def test_basic(self): """Test string representation""" expected_string = ("<OpticalFlow: data_smoothing_radius_km: 14.0, " "data_smoothing_method: box, iterations: 100, " "point_weight: 0.1>") result = str(OpticalFlow()) self.assertEqual(result, expected_string)
def test_basic(self): """Test string representation""" expected_string = ('<OpticalFlow: data_smoothing_radius_km: 7.0, ' 'data_smoothing_method: box, boxsize_km: 30.0, ' 'iterations: 100, point_weight: 0.1>') result = str(OpticalFlow()) self.assertEqual(result, expected_string)
def test_basic(self): """Test string representation""" expected_string = ('<OpticalFlow: data_smoothing_radius_km: 14.0, ' 'data_smoothing_method: box, iterations: 100, ' 'point_weight: 0.1, attributes_dict: {}>') result = str(OpticalFlow()) self.assertEqual(result, expected_string)
def setUp(self): """Define input matrices and dummy plugin""" self.umat = np.array( [ [1.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0], [2.0, 1.0, 1.0, 0.0, 0.0], [3.0, 2.0, 1.0, 1.0, 0.0], ] ) self.vmat = np.array( [ [3.0, 2.0, 1.0, 0.0, 0.0], [2.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], ] ) self.weights = 0.3 * np.multiply(self.umat, self.vmat) self.plugin = OpticalFlow(iterations=20) self.plugin.data_smoothing_radius = 3 self.plugin.boxsize = 3 # NOTE data dimensions are NOT exact multiples of box size self.plugin.data1 = np.zeros((11, 14)) self.plugin.shape = self.plugin.data1.shape
class Test_process_dimensionless(IrisTest): """Test the process_dimensionless method""" def setUp(self): """Set up plugin options and input rainfall-like matrices that produce non-singular outputs. Large matrices with zeros are needed for the smoothing algorithms to behave sensibly.""" self.plugin = OpticalFlow(iterations=20) self.plugin.boxsize = 3 self.smoothing_kernel = 3 rainfall_block = np.array( [ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ] ) self.first_input = np.zeros((16, 16), dtype=np.float32) self.first_input[1:8, 2:9] = rainfall_block self.second_input = np.zeros((16, 16), dtype=np.float32) self.second_input[2:9, 1:8] = rainfall_block def test_basic(self): """Test outputs are of the correct type and value""" ucomp, vcomp = self.plugin.process_dimensionless( self.first_input, self.second_input, 0, 1, self.smoothing_kernel ) self.assertIsInstance(ucomp, np.ndarray) self.assertIsInstance(vcomp, np.ndarray) self.assertAlmostEqual(np.mean(ucomp), 0.97735876) self.assertAlmostEqual(np.mean(vcomp), -0.97735894) def test_axis_inversion(self): """Test inverting x and y axis indices gives the correct result""" ucomp, vcomp = self.plugin.process_dimensionless( self.first_input, self.second_input, 1, 0, self.smoothing_kernel ) self.assertAlmostEqual(np.mean(ucomp), -0.97735894) self.assertAlmostEqual(np.mean(vcomp), 0.97735876)
def test_values(self): """Test output values""" expected_output = np.array([[0., 0., 0., 0., 0.], [0., 0.0625, 0.1250, 0.0625, 0.], [0., 0.1250, 0.2500, 0.1250, 0.], [0., 0.0625, 0.1250, 0.0625, 0.], [0., 0., 0., 0., 0.]]) result = OpticalFlow().makekernel(2) self.assertArrayAlmostEqual(result, expected_output)
def test_null_behaviour(self): """Test reasonable data values are preserved""" umat = 0.5 * np.ones((3, 4), dtype=float) expected_umat = np.copy(umat) expected_vmat = np.copy(self.vmat) expected_weights = np.copy(self.weights) OpticalFlow().extreme_value_check(umat, self.vmat, self.weights) self.assertArrayAlmostEqual(umat, expected_umat) self.assertArrayAlmostEqual(self.vmat, expected_vmat) self.assertArrayAlmostEqual(self.weights, expected_weights)
def setUp(self): """Set up plugin and input rainfall-like cubes""" self.plugin = OpticalFlow(data_smoothing_radius_km=6, boxsize_km=6, iterations=10) coord_points = 2 * np.arange(16) x_coord = DimCoord(coord_points, 'projection_x_coordinate', units='km') y_coord = DimCoord(coord_points, 'projection_y_coordinate', units='km') rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]]) data1 = np.zeros((16, 16)) data1[1:8, 2:9] = rainfall_block self.cube1 = iris.cube.Cube(data1, standard_name='rainfall_rate', units='mm h-1', dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)]) # time1: [datetime.datetime(2018, 2, 20, 4, 0)] time1 = DimCoord(1519099200, standard_name="time", units='seconds since 1970-01-01 00:00:00') self.cube1.add_aux_coord(time1) data2 = np.zeros((16, 16)) data2[2:9, 1:8] = rainfall_block self.cube2 = iris.cube.Cube(data2, standard_name='rainfall_rate', units='mm h-1', dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)]) # time2: [datetime.datetime(2018, 2, 20, 4, 15)] time2 = DimCoord(1519100100, standard_name="time", units='seconds since 1970-01-01 00:00:00') self.cube2.add_aux_coord(time2)
def test_basic(self): """Test initialisation and types""" plugin = OpticalFlow() self.assertIsInstance(plugin.data_smoothing_radius_km, float) self.assertIsInstance(plugin.data_smoothing_method, str) self.assertIsInstance(plugin.iterations, int) self.assertIsInstance(plugin.point_weight, float) self.assertIsNone(plugin.data1) self.assertIsNone(plugin.data2) self.assertIsNone(plugin.shape)
def setUp(self): """Set up dummy plugin and populate data members""" self.plugin = OpticalFlow() self.plugin.data1 = np.array([[1., 2., 3., 4., 5.], [0., 1., 2., 3., 4.], [0., 0., 1., 2., 3.]]) self.plugin.data2 = np.array([[0., 1., 2., 3., 4.], [0., 0., 1., 2., 3.], [0., 0., 0., 1., 2.]]) self.plugin.shape = self.plugin.data1.shape
def test_values(self): """Test extreme data values are infilled with zeros""" expected_umat = np.array([[0., 0.2, 0.4, 0.6], [0.8, 0., 0., 0.], [0., 0., 0., 0.]]) expected_vmat = np.array([[-0.1, -0.1, -0.1, -0.1], [-0.1, 0., 0., 0.], [0., 0., 0., 0.]]) expected_weights = np.array([[0.5, 0.5, 0.5, 0.5], [0.5, 0., 0., 0.], [0., 0., 0., 0.]]) OpticalFlow().extreme_value_check(self.umat, self.vmat, self.weights) self.assertArrayAlmostEqual(self.umat, expected_umat) self.assertArrayAlmostEqual(self.vmat, expected_vmat) self.assertArrayAlmostEqual(self.weights, expected_weights)
def setUp(self): """Set up plugin options and input rainfall-like matrices that produce non-singular outputs. Large matrices with zeros are needed for the smoothing algorithms to behave sensibly.""" self.plugin = OpticalFlow(iterations=10) self.plugin.data_smoothing_radius = 3 self.plugin.boxsize = 3 rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]]) self.first_input = np.zeros((16, 16)) self.first_input[1:8, 2:9] = rainfall_block self.second_input = np.zeros((16, 16)) self.second_input[2:9, 1:8] = rainfall_block
def setUp(self): """Set up dummy plugin and populate data members""" self.plugin = OpticalFlow() self.plugin.data1 = np.array([ [1.0, 2.0, 3.0, 4.0, 5.0], [0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 0.0, 1.0, 2.0, 3.0], ]) self.plugin.data2 = np.array([ [0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 0.0, 1.0, 2.0, 3.0], [0.0, 0.0, 0.0, 1.0, 2.0], ]) self.plugin.shape = self.plugin.data1.shape
def test_basic(self): """Test for correct output types""" u, v = OpticalFlow().solve_for_uv(self.I_xy, self.I_t) self.assertIsInstance(u, float) self.assertIsInstance(v, float)
class Test_process(IrisTest): """Test the process method""" def setUp(self): """Set up plugin and input rainfall-like cubes""" self.plugin = OpticalFlow(iterations=20) self.plugin.data_smoothing_radius_km = np.float32(6.) coord_points = 2 * np.arange(16, dtype=np.float32) x_coord = DimCoord(coord_points, 'projection_x_coordinate', units='km') y_coord = DimCoord(coord_points, 'projection_y_coordinate', units='km') rainfall_block = np.array( [[1., 1., 1., 1., 1., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]], dtype=np.float32) data1 = np.zeros((16, 16)) data1[1:8, 2:9] = rainfall_block self.cube1 = iris.cube.Cube(data1, standard_name='rainfall_rate', units='mm h-1', dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)]) # time1: [datetime.datetime(2018, 2, 20, 4, 0)] time1 = DimCoord(1519099200, standard_name="time", units='seconds since 1970-01-01 00:00:00') self.cube1.add_aux_coord(time1) data2 = np.zeros((16, 16)) data2[2:9, 1:8] = rainfall_block self.cube2 = iris.cube.Cube(data2, standard_name='rainfall_rate', units='mm h-1', dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)]) # time2: [datetime.datetime(2018, 2, 20, 4, 15)] time2 = DimCoord(1519100100, standard_name="time", units='seconds since 1970-01-01 00:00:00') self.cube2.add_aux_coord(time2) def test_basic(self): """Test correct output types and metadata""" ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) for cube in [ucube, vcube]: self.assertIsInstance(cube, iris.cube.Cube) self.assertEqual( cube.coord("time")[0], self.cube2.coord("time")[0]) self.assertEqual(cube.units, "m s-1") self.assertIn("precipitation_advection", cube.name()) self.assertIn("velocity", cube.name()) def test_metadata(self): """Test correct output types and metadata""" metadata_dict = { "attributes": { "mosg__grid_version": "1.0.0", "mosg__model_configuration": "nc_det", "source": "Met Office Nowcast", "institution": "Met Office", "title": "Nowcast on UK 2 km Standard Grid" } } plugin = OpticalFlow(iterations=20, metadata_dict=metadata_dict) plugin.data_smoothing_radius_km = 6. ucube, vcube = plugin.process(self.cube1, self.cube2, boxsize=3) for cube in [ucube, vcube]: self.assertEqual(cube.attributes, metadata_dict["attributes"]) def test_values(self): """Test velocity values are as expected (in m/s)""" ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube.data), -2.1719086) self.assertAlmostEqual(np.mean(vcube.data), 2.1719084) def test_decrease_time_interval(self): """Test that decreasing the time interval between radar frames below 15 minutes does not alter the smoothing radius. To test this the time interval is halved, which should give an answer identical to the values test above multiplied by a factor of two.""" time_unit = self.cube2.coord("time").units new_time = time_unit.num2date(self.cube2.coord("time").points[0]) new_time -= datetime.timedelta(seconds=450) self.cube2.remove_coord("time") time_coord = DimCoord(time_unit.date2num(new_time), standard_name="time", units=time_unit) self.cube2.add_aux_coord(time_coord) ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube.data), -2.1719086 * 2.) self.assertAlmostEqual(np.mean(vcube.data), 2.1719084 * 2.) def test_increase_time_interval(self): """Test that increasing the time interval between radar frames above 15 minutes leads to an increase in the data smoothing radius. In this test this will result in a smoothing radius larger than the box size, which is not allowed and will raise an exception. The updated radius value in this case is 12 km (6 grid squares), exceeding the 3 square box size.""" time_unit = self.cube2.coord("time").units new_time = time_unit.num2date(self.cube2.coord("time").points[0]) new_time += datetime.timedelta(seconds=900) self.cube2.remove_coord("time") time_coord = DimCoord(time_unit.date2num(new_time), standard_name="time", units=time_unit) self.cube2.add_aux_coord(time_coord) msg = "Box size ([0-9]+) too small" with self.assertRaisesRegex(ValueError, msg): _, _ = self.plugin.process(self.cube1, self.cube2, boxsize=3) def test_error_small_kernel(self): """Test failure if data smoothing radius is too small""" self.plugin.data_smoothing_radius_km = 3. msg = "Input data smoothing radius 1 too small " with self.assertRaisesRegexp(ValueError, msg): _ = self.plugin.process(self.cube1, self.cube2) def test_error_small_box(self): """Test failure if box size is smaller than data smoothing radius""" msg = "Box size 2 too small" with self.assertRaisesRegexp(ValueError, msg): _, _ = self.plugin.process(self.cube1, self.cube2, boxsize=2) def test_error_unmatched_coords(self): """Test failure if cubes are provided on unmatched grids""" cube2 = self.cube2.copy() for ax in ["x", "y"]: cube2.coord(axis=ax).points = 4 * np.arange(16) msg = "Input cubes on unmatched grids" with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(self.cube1, cube2) def test_error_no_time_difference(self): """Test failure if two cubes are provided with the same time""" msg = "Expected positive time difference " with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(self.cube1, self.cube1) def test_error_negative_time_difference(self): """Test failure if cubes are provided in the wrong order""" msg = "Expected positive time difference " with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(self.cube2, self.cube1) @ManageWarnings(record=True) def test_warning_zero_inputs(self, warning_list=None): """Test code raises a warning and sets advection velocities to zero if there is no rain in the input cubes.""" null_data = np.zeros(self.cube1.shape) cube1 = self.cube1.copy(data=null_data) cube2 = self.cube2.copy(data=null_data) ucube, vcube = self.plugin.process(cube1, cube2) warning_msg = "No non-zero data in input fields" self.assertTrue( any(item.category == UserWarning for item in warning_list)) self.assertTrue(any(warning_msg in str(item) for item in warning_list)) self.assertArrayAlmostEqual(ucube.data, null_data) self.assertArrayAlmostEqual(vcube.data, null_data) def test_error_nonmatching_inputs(self): """Test failure if cubes are of different data types""" self.cube1.rename("snowfall_rate") msg = "Input cubes contain different data types" with self.assertRaisesRegexp(ValueError, msg): _, _ = self.plugin.process(self.cube1, self.cube2) @ManageWarnings(record=True) def test_warning_nonprecip_inputs(self, warning_list=None): """Test code raises a warning if input cubes have non-rain variable names""" self.cube1.rename("snowfall_rate") self.cube2.rename("snowfall_rate") _, _ = self.plugin.process(self.cube1, self.cube2, boxsize=3) warning_msg = "Input data are of non-precipitation type" self.assertTrue( any(item.category == UserWarning for item in warning_list)) self.assertTrue(any(warning_msg in str(item) for item in warning_list))
def test_basic(self): """Test for correct output type""" result = OpticalFlow().makekernel(2) self.assertIsInstance(result, np.ndarray)
def process(original_cube_list, orographic_enhancement_cube=None, metadata_dict=None, ofc_box_size=30, smart_smoothing_iterations=100, extrapolate=False, max_lead_time=360, lead_time_interval=15): """Calculates optical flow and can (optionally) extrapolate data. Calculates optical flow components from input fields and (optionally) extrapolate to required lead times. Args: original_cube_list (iris.cube.CubeList): Cubelist from which to calculate optical flow velocities. The cubes require a 'time' coordinate on which they are sorted, so the order of cubes does not matter. orographic_enhancement_cube (iris.cube.Cube): Cube containing the orographic enhancement fields. Default is None. metadata_dict (dict): Dictionary containing required changes to the metadata. Information describing the intended contents of the dictionary is available in improver.utilities.cube_metadata.amend_metadata. Every output cube will have the metadata_dict applied. Default is None. ofc_box_size (int): Size of square 'box' (in grid spaces) within which to solve the optical flow equations. Default is 30. smart_smoothing_iterations (int): Number of iterations to perform in enforcing smoothness constraint for optical flow velocities. Default is 100. extrapolate (bool): If True, advects current data forward to specified lead times. Default is False. max_lead_time (int): Maximum lead time required (mins). Ignored unless extrapolate is True. Default is 360. lead_time_interval (int): Interval between required lead times (mins). Ignored unless extrapolate is True. Default is 15. Returns: (tuple): tuple containing: **forecast_cubes** (list<Cube>): List of Cubes if extrapolate is True, else None. **u_and_v_mean** (list<Cube>): List of the umean and vmean cubes. Raises: ValueError: If there is no oe_cube but a cube is called 'precipitation_rate'. """ if orographic_enhancement_cube: cube_list = ApplyOrographicEnhancement("subtract").process( original_cube_list, orographic_enhancement_cube) else: cube_list = original_cube_list if any("precipitation_rate" in cube.name() for cube in cube_list): cube_names = [cube.name() for cube in cube_list] msg = ("For precipitation fields, orographic enhancement " "filepaths must be supplied. The names of the cubes " "supplied were: {}".format(cube_names)) raise ValueError(msg) # order input files by validity time cube_list.sort(key=lambda x: x.coord("time").points[0]) time_coord = cube_list[-1].coord("time") # calculate optical flow velocities from T-1 to T and T-2 to T-1 ofc_plugin = OpticalFlow(iterations=smart_smoothing_iterations, metadata_dict=metadata_dict) u_cubes = iris.cube.CubeList([]) v_cubes = iris.cube.CubeList([]) for older_cube, newer_cube in zip(cube_list[:-1], cube_list[1:]): ucube, vcube = ofc_plugin.process(older_cube, newer_cube, boxsize=ofc_box_size) u_cubes.append(ucube) v_cubes.append(vcube) # average optical flow velocity components u_cube = u_cubes.merge_cube() u_mean = u_cube.collapsed("time", iris.analysis.MEAN) u_mean.coord("time").points = time_coord.points u_mean.coord("time").units = time_coord.units v_cube = v_cubes.merge_cube() v_mean = v_cube.collapsed("time", iris.analysis.MEAN) v_mean.coord("time").points = time_coord.points v_mean.coord("time").units = time_coord.units u_and_v_mean = [u_mean, v_mean] forecast_cubes = [] if extrapolate: # generate list of lead times in minutes lead_times = np.arange(0, max_lead_time + 1, lead_time_interval) forecast_plugin = CreateExtrapolationForecast( original_cube_list[-1], u_mean, v_mean, orographic_enhancement_cube=orographic_enhancement_cube, metadata_dict=metadata_dict) # extrapolate input data to required lead times for lead_time in lead_times: forecast_cubes.append( forecast_plugin.extrapolate(leadtime_minutes=lead_time)) return forecast_cubes, u_and_v_mean
def test_unsuitable_parameters(self): """Test raises error if plugin is initialised with unsuitable parameter values""" with self.assertRaises(ValueError): _ = OpticalFlow(data_smoothing_radius_km=10, boxsize_km=9.9)
def test_error_small_kernel(self): """Test failure if data smoothing radius is too small""" plugin = OpticalFlow(data_smoothing_radius_km=3, boxsize_km=6) msg = "Input data smoothing radius 1 too small " with self.assertRaisesRegexp(ValueError, msg): _ = plugin.process(self.cube1, self.cube2)
class Test__zero_advection_velocities_warning(IrisTest): """Test the _zero_advection_velocities_warning.""" def setUp(self): """Set up arrays of advection velocities""" self.plugin = OpticalFlow() rain = np.ones((3, 3)) self.rain_mask = np.where(rain > 0) @ManageWarnings(record=True) def test_warning_raised(self, warning_list=None): """Test that a warning is raised if an excess number of zero values are present within the input array.""" greater_than_10_percent_zeroes_array = np.array([[3.0, 5.0, 7.0], [0.0, 2.0, 1.0], [1.0, 1.0, 1.0]]) warning_msg = "cells within the domain have zero advection" self.plugin._zero_advection_velocities_warning( greater_than_10_percent_zeroes_array, self.rain_mask) self.assertTrue( any(item.category == UserWarning for item in warning_list)) self.assertTrue(any(warning_msg in str(item) for item in warning_list)) @ManageWarnings(record=True) def test_no_warning_raised_if_no_zeroes(self, warning_list=None): """Test that no warning is raised if the number of zero values in the array is below the threshold used to define an excessive number of zero values.""" nonzero_array = np.array([[3.0, 5.0, 7.0], [2.0, 2.0, 1.0], [1.0, 1.0, 1.0]]) self.plugin._zero_advection_velocities_warning(nonzero_array, self.rain_mask) self.assertTrue(len(warning_list) == 0) @ManageWarnings(record=True) def test_no_warning_raised_if_fewer_zeroes_than_threshold( self, warning_list=None): """Test that no warning is raised if the number of zero values in the array is below the threshold used to define an excessive number of zero values when at least one zero exists within the array.""" rain = np.ones((5, 5)) less_than_10_percent_zeroes_array = np.array([ [1.0, 3.0, 5.0, 7.0, 1.0], [0.0, 2.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], ]) self.plugin._zero_advection_velocities_warning( less_than_10_percent_zeroes_array, np.where(rain > 0)) self.assertTrue(len(warning_list) == 0) @ManageWarnings(record=True) def test_no_warning_raised_for_modified_threshold(self, warning_list=None): """Test that no warning is raised if the number of zero values in the array is below the threshold used to define an excessive number of zero values when the threshold is modified.""" less_than_30_percent_zeroes_array = np.array([[3.0, 5.0, 7.0], [0.0, 2.0, 1.0], [0.0, 1.0, 1.0]]) self.plugin._zero_advection_velocities_warning( less_than_30_percent_zeroes_array, self.rain_mask, zero_vel_threshold=0.3) self.assertTrue(len(warning_list) == 0) @ManageWarnings(record=True) def test_no_warning_raised_outside_rain(self, warning_list=None): """Test warning ignores zeros outside the rain area mask""" rain = np.array([[0, 0, 1], [0, 1, 1], [1, 1, 1]]) wind = np.array([[0, 0, 1], [0, 1, 1], [1, 1, 1]]) self.plugin._zero_advection_velocities_warning(wind, np.where(rain > 0)) self.assertTrue(len(warning_list) == 0)
class Test_process(IrisTest): """Test the process method""" def setUp(self): """Set up plugin and input rainfall-like cubes""" self.plugin = OpticalFlow(data_smoothing_radius_km=6, boxsize_km=6, iterations=10) coord_points = 2 * np.arange(16) x_coord = DimCoord(coord_points, 'projection_x_coordinate', units='km') y_coord = DimCoord(coord_points, 'projection_y_coordinate', units='km') rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 3., 3., 2., 1., 1.], [1., 2., 2., 2., 2., 1., 1.], [1., 1., 1., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]]) data1 = np.zeros((16, 16)) data1[1:8, 2:9] = rainfall_block self.cube1 = iris.cube.Cube(data1, standard_name='rainfall_rate', units='mm h-1', dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)]) # time1: [datetime.datetime(2018, 2, 20, 4, 0)] time1 = DimCoord(1519099200, standard_name="time", units='seconds since 1970-01-01 00:00:00') self.cube1.add_aux_coord(time1) data2 = np.zeros((16, 16)) data2[2:9, 1:8] = rainfall_block self.cube2 = iris.cube.Cube(data2, standard_name='rainfall_rate', units='mm h-1', dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)]) # time2: [datetime.datetime(2018, 2, 20, 4, 15)] time2 = DimCoord(1519100100, standard_name="time", units='seconds since 1970-01-01 00:00:00') self.cube2.add_aux_coord(time2) def test_basic(self): """Test correct output types and metadata""" ucube, vcube = self.plugin.process(self.cube1, self.cube2) for cube in [ucube, vcube]: self.assertIsInstance(cube, iris.cube.Cube) self.assertEqual( cube.coord("time")[0], self.cube2.coord("time")[0]) self.assertEqual(cube.units, "m s-1") self.assertIn("advection_velocity_", cube.name()) def test_values(self): """Test velocity values are as expected (in m/s)""" ucube, vcube = self.plugin.process(self.cube1, self.cube2) self.assertAlmostEqual(np.mean(ucube.data), -2.12078369915) self.assertAlmostEqual(np.mean(vcube.data), 2.12078369915) def test_error_small_kernel(self): """Test failure if data smoothing radius is too small""" plugin = OpticalFlow(data_smoothing_radius_km=3, boxsize_km=6) msg = "Input data smoothing radius 1 too small " with self.assertRaisesRegexp(ValueError, msg): _ = plugin.process(self.cube1, self.cube2) def test_error_unmatched_coords(self): """Test failure if cubes are provided on unmatched grids""" cube2 = self.cube2.copy() for ax in ["x", "y"]: cube2.coord(axis=ax).points = 4 * np.arange(16) msg = "Input cubes on unmatched grids" with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(self.cube1, cube2) def test_error_no_time_difference(self): """Test failure if two cubes are provided with the same time""" msg = "Expected positive time difference " with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(self.cube1, self.cube1) def test_error_negative_time_difference(self): """Test failure if cubes are provided in the wrong order""" msg = "Expected positive time difference " with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(self.cube2, self.cube1) def test_error_irregular_grid(self): """Test failure if cubes have different x/y grid lengths""" cube1 = self.cube1.copy() cube2 = self.cube2.copy() for cube in [cube1, cube2]: cube.coord(axis="y").points = 4 * np.arange(16) msg = "Input cube has different grid spacing in x and y" with self.assertRaisesRegexp(InvalidCubeError, msg): _ = self.plugin.process(cube1, cube2)
def setUp(self): """Set up arrays of advection velocities""" self.plugin = OpticalFlow() rain = np.ones((3, 3)) self.rain_mask = np.where(rain > 0)
class Test_process(IrisTest): """Test the process method""" def setUp(self): """Set up plugin and input rainfall-like cubes""" self.plugin = OpticalFlow(iterations=20) self.plugin.data_smoothing_radius_km = np.float32(6.0) coord_points = 2000 * np.arange(16, dtype=np.float32) # in metres rainfall_block = np.array( [ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ], dtype=np.float32, ) data1 = np.zeros((16, 16), dtype=np.float32) data1[1:8, 2:9] = rainfall_block self.cube1 = set_up_variable_cube( data1, name="rainfall_rate", units="mm h-1", spatial_grid="equalarea", time=datetime(2018, 2, 20, 4, 0), frt=datetime(2018, 2, 20, 4, 0), ) self.cube1.coord(axis="x").points = coord_points self.cube1.coord(axis="y").points = coord_points data2 = np.zeros((16, 16), dtype=np.float32) data2[2:9, 1:8] = rainfall_block self.cube2 = set_up_variable_cube( data2, name="rainfall_rate", units="mm h-1", spatial_grid="equalarea", time=datetime(2018, 2, 20, 4, 15), frt=datetime(2018, 2, 20, 4, 15), ) self.cube2.coord(axis="x").points = coord_points self.cube2.coord(axis="y").points = coord_points def test_basic(self): """Test correct output types and metadata""" ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) for cube in [ucube, vcube]: self.assertIsInstance(cube, iris.cube.Cube) self.assertEqual( cube.coord("time")[0], self.cube2.coord("time")[0]) self.assertEqual(cube.units, "m s-1") self.assertIn("precipitation_advection", cube.name()) self.assertIn("velocity", cube.name()) def test_values(self): """Test velocity values are as expected (in m/s)""" ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube.data), -2.1719084) self.assertAlmostEqual(np.mean(vcube.data), 2.1719084) def test_values_perturbation(self): """Test velocity values are as expected when input cubes are presented as an older extrapolation forecast and recent observation""" # make cube 1 into a forecast with a 15 minute forecast period self.cube1.coord("time").points = self.cube2.coord("time").points self.cube1.coord("forecast_reference_time").points = ( self.cube1.coord("forecast_reference_time").points - 15 * 60) self.cube1.coord("forecast_period").points = [15 * 60] ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube.data), -2.1719084) self.assertAlmostEqual(np.mean(vcube.data), 2.1719084) def test_values_with_precip_rate_in_m_per_s(self): """Test velocity values are as expected (in m/s) when the input precipitation rates are in units of m/s rather than the expected mm/hr.""" self.cube1.convert_units("m s-1") self.cube2.convert_units("m s-1") ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube.data), -2.1719084) self.assertAlmostEqual(np.mean(vcube.data), 2.1719084) def test_values_with_masked_data(self): """Test velocity values are as expected when masked cubes are used as input to the tests. This test is to capture behaviour whereby mask fill values were being used as valid data. This resulted in far from correct velocities being calculated by the optical flow code. Notably the velocity fields did not reflect the position of precipitation in the input precipitation fields, and the returned velocities were too low. In this test masked cubes are used and comparable unmasked cubes in which there the fill values are included in the field. We expect the results to be different, with higher velocities returned for the masked cubes. """ mask = np.zeros((16, 16)) mask[:2, :] = 1 mask[:, :2] = 1 # Ensure the masked data points contain a high fill value. data1 = self.cube1.data data2 = self.cube2.data data1[:2, :] = 1.0e36 data1[:, :2] = 1.0e36 data2[:2, :] = 1.0e36 data2[:, :2] = 1.0e36 masked1 = np.ma.MaskedArray(self.cube1.data, mask=mask) masked2 = np.ma.MaskedArray(self.cube2.data, mask=mask) masked_cube1 = self.cube1.copy(data=masked1) masked_cube2 = self.cube2.copy(data=masked2) unmasked_cube1 = self.cube1.copy(data=data1) unmasked_cube2 = self.cube2.copy(data=data2) ucube_masked, vcube_masked = self.plugin.process(masked_cube1, masked_cube2, boxsize=3) ucube_unmasked, vcube_unmasked = self.plugin.process(unmasked_cube1, unmasked_cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube_masked.data), -1.4995803) self.assertAlmostEqual(np.mean(vcube_masked.data), 1.4995805) self.assertAlmostEqual(np.mean(ucube_unmasked.data), -0.2869996) self.assertAlmostEqual(np.mean(vcube_unmasked.data), 0.28699964) def test_error_for_unconvertable_units(self): """Test that an exception is raised if the input precipitation cubes have units that cannot be converted to mm/hr.""" self.cube1.units = "m" self.cube2.units = "m" msg = "Input data are in units that cannot be converted to mm/hr" with self.assertRaisesRegex(ValueError, msg): self.plugin.process(self.cube1, self.cube2, boxsize=3) def test_input_cubes_unchanged(self): """Test the input precipitation rate cubes are unchanged by use in the optical flow plugin. One of the cubes is converted to rates in ms-1 before use to ensure the cube remains in these units despite the default working units within optical flow being mm/hr.""" self.cube1.convert_units("m s-1") cube1_ref = self.cube1.copy() cube2_ref = self.cube2.copy() _, _ = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertEqual(self.cube1, cube1_ref) self.assertEqual(self.cube2, cube2_ref) def test_decrease_time_interval(self): """Test that decreasing the time interval between radar frames below 15 minutes does not alter the smoothing radius. To test this the time interval is halved, which should give an answer identical to the values test above multiplied by a factor of two.""" time_unit = self.cube2.coord("time").units new_time = time_unit.num2date(self.cube2.coord("time").points[0]) new_time -= timedelta(seconds=450) self.cube2.remove_coord("time") time_coord = DimCoord(time_unit.date2num(new_time), standard_name="time", units=time_unit) self.cube2.add_aux_coord(time_coord) ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3) self.assertAlmostEqual(np.mean(ucube.data), -2.1719084 * 2.0) self.assertAlmostEqual(np.mean(vcube.data), 2.1719084 * 2.0) def test_increase_time_interval(self): """Test that increasing the time interval between radar frames above 15 minutes leads to an increase in the data smoothing radius. In this test this will result in a smoothing radius larger than the box size, which is not allowed and will raise an exception. The updated radius value in this case is 12 km (6 grid squares), exceeding the 3 square box size.""" time_unit = self.cube2.coord("time").units new_time = time_unit.num2date(self.cube2.coord("time").points[0]) new_time += timedelta(seconds=900) self.cube2.remove_coord("time") time_coord = DimCoord(time_unit.date2num(new_time), standard_name="time", units=time_unit) self.cube2.add_aux_coord(time_coord) msg = "Box size ([0-9]+) too small" with self.assertRaisesRegex(ValueError, msg): self.plugin.process(self.cube1, self.cube2, boxsize=3) def test_error_small_kernel(self): """Test failure if data smoothing radius is too small""" self.plugin.data_smoothing_radius_km = 3.0 msg = "Input data smoothing radius 1 too small " with self.assertRaisesRegex(ValueError, msg): _ = self.plugin.process(self.cube1, self.cube2) def test_error_small_box(self): """Test failure if box size is smaller than data smoothing radius""" msg = "Box size 2 too small" with self.assertRaisesRegex(ValueError, msg): self.plugin.process(self.cube1, self.cube2, boxsize=2) def test_error_unmatched_coords(self): """Test failure if cubes are provided on unmatched grids""" cube2 = self.cube2.copy() for ax in ["x", "y"]: cube2.coord(axis=ax).points = 4 * np.arange(16) msg = "Input cubes on unmatched grids" with self.assertRaisesRegex(InvalidCubeError, msg): _ = self.plugin.process(self.cube1, cube2) def test_error_no_time_difference(self): """Test failure if two cubes are provided with the same time""" msg = "Expected positive time difference " with self.assertRaisesRegex(InvalidCubeError, msg): _ = self.plugin.process(self.cube1, self.cube1) def test_error_negative_time_difference(self): """Test failure if cubes are provided in the wrong order""" msg = "Expected positive time difference " with self.assertRaisesRegex(InvalidCubeError, msg): _ = self.plugin.process(self.cube2, self.cube1) @ManageWarnings(record=True) def test_warning_zero_inputs(self, warning_list=None): """Test code raises a warning and sets advection velocities to zero if there is no rain in the input cubes.""" null_data = np.zeros(self.cube1.shape) cube1 = self.cube1.copy(data=null_data) cube2 = self.cube2.copy(data=null_data) ucube, vcube = self.plugin.process(cube1, cube2) warning_msg = "No non-zero data in input fields" self.assertTrue( any(item.category == UserWarning for item in warning_list)) self.assertTrue(any(warning_msg in str(item) for item in warning_list)) self.assertArrayAlmostEqual(ucube.data, null_data) self.assertArrayAlmostEqual(vcube.data, null_data) def test_error_nonmatching_inputs(self): """Test failure if cubes are of different data types""" self.cube1.rename("snowfall_rate") msg = "Input cubes contain different data types" with self.assertRaisesRegex(ValueError, msg): self.plugin.process(self.cube1, self.cube2) @ManageWarnings(record=True) def test_warning_nonprecip_inputs(self, warning_list=None): """Test code raises a warning if input cubes have non-rain variable names""" self.cube1.rename("snowfall_rate") self.cube2.rename("snowfall_rate") _, _ = self.plugin.process(self.cube1, self.cube2, boxsize=3) warning_msg = "Input data are of non-precipitation type" self.assertTrue( any(item.category == UserWarning for item in warning_list)) self.assertTrue(any(warning_msg in str(item) for item in warning_list))
def test_basic(self): """Test for correct output types""" OpticalFlow().extreme_value_check(self.umat, self.vmat, self.weights) self.assertIsInstance(self.umat, np.ndarray) self.assertIsInstance(self.vmat, np.ndarray) self.assertIsInstance(self.weights, np.ndarray)
def test_values(self): """Test output values""" u, v = OpticalFlow().solve_for_uv(self.I_xy, self.I_t) self.assertAlmostEqual(u, 1.) self.assertAlmostEqual(v, 2.)