class TestUngriddedDataList(TestCase): def setUp(self): x_points = np.arange(-10, 11, 5) y_points = np.arange(-5, 6, 5) y, x = np.meshgrid(y_points, x_points) x = Coord(x, Metadata(name='lat', standard_name='latitude', units='degrees')) y = Coord(y, Metadata(name='lon', standard_name='longitude', units='degrees')) data = np.reshape(np.arange(15) + 1.0, (5, 3)) self.coords = CoordList([x, y]) ug1 = UngriddedData(data, Metadata(standard_name='rain', long_name="TOTAL RAINFALL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) ug2 = UngriddedData(data * 0.1, Metadata(standard_name='snow', long_name="TOTAL SNOWFALL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) self.ungridded_data_list = UngriddedDataList([ug1, ug2]) def test_GIVEN_data_containing_multiple_matching_coordinates_WHEN_coords_THEN_only_unique_coords_returned(self): unique_coords = self.ungridded_data_list.coords() assert_that(len(unique_coords), is_(2)) assert_that(isinstance(unique_coords, CoordList)) coord_names = [coord.standard_name for coord in unique_coords] assert_that(coord_names, contains_inanyorder('latitude', 'longitude')) @skip_pandas def test_GIVEN_multiple_ungridded_data_WHEN_call_as_data_frame_THEN_returns_valid_data_frame(self): df = self.ungridded_data_list.as_data_frame() assert_that(df['rain'][5] == 6) assert_almost_equal(df['snow'][5], 0.6) assert_that(df['lat'][13] == 10) assert_that(df['lon'][0] == -5) @skip_pandas def test_GIVEN_multiple_ungridded_data_with_missing_data_WHEN_call_as_data_frame_THEN_returns_valid_data_frame(self): d = np.reshape(np.arange(15) + 10.0, (5, 3)) data = np.ma.masked_array(d, np.zeros(d.shape, dtype=bool)) data.mask[1,2] = True ug3 = UngriddedData(data, Metadata(standard_name='hail', long_name="TOTAL HAIL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) self.ungridded_data_list.append(ug3) df = self.ungridded_data_list.as_data_frame() assert_that(df['rain'][5] == 6) assert_almost_equal(df['snow'][5], 0.6) assert_that(df['lat'][13] == 10) assert_that(df['lon'][0] == -5) assert_almost_equal(df['hail'][1], 11.0) assert_that(np.isnan(df['hail'][np.ravel_multi_index([1, 2], (5, 3))])) self.ungridded_data_list.pop()
def mask_data(data, cad_score, extinction_qc, cad_confidence=20): """ Default CAD confidence of 80 from doi:10.1002/2013JD019527 The extinction QC values are:: Bit Value Interpretation 1 0 unconstrained retrieval; initial lidar ratio unchanged during solution process 1 1 constrained retrieval 2 2 Initial lidar ratio reduced to prevent divergence of extinction solution 3 4 Initial lidar ratio increased to reduce the number of negative extinction coefficients in the derived solution 4 8 Calculated backscatter coefficient exceeds the maximum allowable value 5 16 Layer being analyzed has been identified by the feature finder as being totally attenuating (i.e., opaque) 6 32 Estimated optical depth error exceeds the maximum allowable value 7 64 Solution converges, but with an unacceptably large number of negative values 8 128 Retrieval terminated at maximum iterations 9 256 No solution possible within allowable lidar ratio bounds 16 32768 Fill value or no solution attempted :param CommonDataList data: The data to be masked :param cad_score: :param extinction_qc: :param cad_confidence: :return: """ from cis.data_io.ungridded_data import UngriddedDataList column_mask = find_good_aerosol_columns(cad_score, cad_confidence) & find_good_extinction_columns(extinction_qc) # Now do the full profiles. Pull out the valid parts of the aerosol and extinction masks good_extinctions = _find_converged_extinction_points(extinction_qc.data[column_mask]) aerosols = _find_aerosol(cad_score.data[column_mask], cad_confidence) # First create the aerosol masked data (which is a shared mask) compressed_data = UngriddedDataList() for d in data: if d.data.shape[0] != column_mask.shape[0]: # This only outputs a warning in numpy currently raise ValueError("The data shape doesn't match the mask shape") c = d[column_mask] # If the data has (an extended) second dimension if len(c.shape) > 1 and c.shape[1] > 1: # Apply the aerosol (2D) mask c.data = apply_mask_to_numpy_array(c.data, ~aerosols) if c.name().startswith('Extinction'): # Apply the good extinction (2D) mask c.data = apply_mask_to_numpy_array(c.data, ~good_extinctions) compressed_data.append(c) print("Valid {} points: {}".format(c.name(), c.count())) return compressed_data
def constrain(self, data): """Subsets the supplied data. :param data: data to be subsetted :return: subsetted data """ import numpy as np from datetime import datetime from cis.data_io.ungridded_data import UngriddedDataList if isinstance(data, list): # Calculating masks and indices will only take place on the first iteration, # so we can just call this method recursively if we've got a list of data. output = UngriddedDataList() for var in data: output.append(self.constrain(var)) return output _data = self._create_data_for_subset(data) _shape = self._limits.pop('shape', None) if self._combined_mask is None: # Create the combined mask across all limits shape = _data.coords( )[0].data.shape # This assumes they are all the same shape combined_mask = np.ones(shape, dtype=bool) for coord, limit in self._limits.items(): # Convert the points to datetimes if the limit is a datetime if isinstance(limit.start, datetime): points = _data.coord(coord).units.num2date( _data.coord(coord).data) else: points = _data.coord(coord).data # Select any points which are <= to the stop limit AND >= to the start limit mask = (np.less_equal(points, limit.stop) & np.greater_equal(points, limit.start)) combined_mask &= mask self._combined_mask = combined_mask _data = _data[self._combined_mask] if _shape is not None: if self._shape_indices is None: self._shape_indices = _get_ungridded_subset_region_indices( _data, _shape) _data = _data[np.unravel_index(self._shape_indices, _data.shape)] if _data.size == 0: _data = None return _data
def constrain(self, data): """Subsets the supplied data. :param data: data to be subsetted :return: subsetted data """ import numpy as np from datetime import datetime from cis.data_io.ungridded_data import UngriddedDataList if isinstance(data, list): # Calculating masks and indices will only take place on the first iteration, # so we can just call this method recursively if we've got a list of data. output = UngriddedDataList() for var in data: output.append(self.constrain(var)) return output _data = self._create_data_for_subset(data) _shape = self._limits.pop('shape', None) if self._combined_mask is None: # Create the combined mask across all limits shape = _data.coords()[0].data.shape # This assumes they are all the same shape combined_mask = np.ones(shape, dtype=bool) for coord, limit in self._limits.items(): # Convert the points to datetimes if the limit is a datetime if isinstance(limit.start, datetime): points = _data.coord(coord).units.num2date(_data.coord(coord).data) else: points = _data.coord(coord).data # Select any points which are <= to the stop limit AND >= to the start limit mask = (np.less_equal(points, limit.stop) & np.greater_equal(points, limit.start)) combined_mask &= mask self._combined_mask = combined_mask _data = _data[self._combined_mask] if _shape is not None: if self._shape_indices is None: self._shape_indices = _get_ungridded_subset_region_indices(_data, _shape) _data = _data[np.unravel_index(self._shape_indices, _data.shape)] if _data.size == 0: _data = None return _data
def create_data_object(self, filenames, variable): from itertools import product logging.debug("Creating data object for variable " + variable) # reading coordinates # the variable here is needed to work out whether to apply interpolation to the lat/lon data or not coords = self._create_coord_list(filenames, variable) # reading of variables sdata, vdata = hdf.read(filenames, variable) # retrieve data + its metadata var = sdata[variable] metadata = hdf.read_metadata(var, "SD") # Check the dimension of this variable _, ndim, dim_len, _, _ = var[0].info() if ndim == 2: return UngriddedData(var, metadata, coords, _get_MODIS_SDS_data) elif ndim < 2: raise NotImplementedError("1D field in MODIS L2 data.") else: result = UngriddedDataList() # Iterate over all but the last two dimensions ranges = [range(n) for n in dim_len[:-2]] for indices in product(*ranges): for manager in var: manager._start = list(indices) + [0, 0] manager._count = [1 ] * len(indices) + manager.info()[2][-2:] result.append( UngriddedData(var, metadata, coords.copy(), _get_MODIS_SDS_data)) return result
def collocate(self, points, data, constraint, kernel): """ This collocator takes a list of HyperPoints and a data object (currently either Ungridded data or a Cube) and returns one new LazyData object with the values as determined by the constraint and kernel objects. The metadata for the output LazyData object is copied from the input data object. :param UngriddedData or UngriddedCoordinates points: Object defining the sample points :param UngriddedData data: The source data to collocate from :param constraint: An instance of a Constraint subclass which takes a data object and returns a subset of that data based on it's internal parameters :param kernel: An instance of a Kernel subclass which takes a number of points and returns a single value :return UngriddedData or UngriddedDataList: Depending on the input """ log_memory_profile("GeneralUngriddedCollocator Initial") if isinstance(data, list): # Indexing and constraints (for SepConstraintKdTree) will only take place on the first iteration, # so we really can just call this method recursively if we've got a list of data. output = UngriddedDataList() for var in data: output.extend(self.collocate(points, var, constraint, kernel)) return output # First fix the sample points so that they all fall within the same 360 degree longitude range _fix_longitude_range(points.coords(), points) # Then fix the data points so that they fall onto the same 360 degree longitude range as the sample points _fix_longitude_range(points.coords(), data) # Convert to dataframes for fancy indexing sample_points = points.as_data_frame(time_index=False, name='vals') data_points = data.as_data_frame(time_index=False, name='vals').dropna(axis=0) log_memory_profile("GeneralUngriddedCollocator after data retrieval") # Create index if constraint and/or kernel require one. coord_map = None data_index.create_indexes(constraint, points, data_points, coord_map) log_memory_profile("GeneralUngriddedCollocator after indexing") logging.info("--> Collocating...") # Create output arrays. self.var_name = data.var_name self.var_long_name = data.long_name self.var_standard_name = data.standard_name self.var_units = data.units var_set_details = kernel.get_variable_details(self.var_name, self.var_long_name, self.var_standard_name, self.var_units) sample_points_count = len(sample_points) # Create an empty masked array to store the collocated values. The elements will be unmasked by assignment. values = np.ma.masked_all((len(var_set_details), sample_points_count)) values.fill_value = self.fill_value log_memory_profile("GeneralUngriddedCollocator after output array creation") logging.info(" {} sample points".format(sample_points_count)) # Apply constraint and/or kernel to each sample point. if isinstance(kernel, nn_horizontal_only): # Only find the nearest point using the kd-tree, without constraint in other dimensions nearest_points = data_points.iloc[constraint.haversine_distance_kd_tree_index.find_nearest_point(sample_points)] values[0, :] = nearest_points.vals.values else: for i, point, con_points in constraint.get_iterator(self.missing_data_for_missing_sample, None, None, data_points, None, sample_points, None): try: values[:, i] = kernel.get_value(point, con_points) # Kernel returns either a single value or a tuple of values to insert into each output variable. except CoordinateMultiDimError as e: raise NotImplementedError(e) except ValueError as e: pass log_memory_profile("GeneralUngriddedCollocator after running kernel on sample points") # Mask any bad values values = np.ma.masked_invalid(values) return_data = UngriddedDataList() for idx, var_details in enumerate(var_set_details): var_metadata = Metadata(name=var_details[0], long_name=var_details[1], shape=(len(sample_points),), missing_value=self.fill_value, units=var_details[3]) set_standard_name_if_valid(var_metadata, var_details[2]) return_data.append(UngriddedData(values[idx, :], var_metadata, points.coords())) log_memory_profile("GeneralUngriddedCollocator final") return return_data
def collocate(self, points, data, constraint, kernel): """ This collocator takes a list of HyperPoints and a data object (currently either Ungridded data or a Cube) and returns one new LazyData object with the values as determined by the constraint and kernel objects. The metadata for the output LazyData object is copied from the input data object. :param points: UngriddedData or UngriddedCoordinates defining the sample points :param data: An UngriddedData object or Cube, or any other object containing metadata that the constraint object can read. May also be a list of objects, in which case a list will be returned :param constraint: An instance of a Constraint subclass which takes a data object and returns a subset of that data based on it's internal parameters :param kernel: An instance of a Kernel subclass which takes a number of points and returns a single value :return: A single LazyData object """ log_memory_profile("GeneralUngriddedCollocator Initial") if isinstance(data, list): # Indexing and constraints (for SepConstraintKdTree) will only take place on the first iteration, # so we really can just call this method recursively if we've got a list of data. output = UngriddedDataList() for var in data: output.extend(self.collocate(points, var, constraint, kernel)) return output metadata = data.metadata sample_points = points.get_all_points() # Convert ungridded data to a list of points if kernel needs it. # Special case checks for kernels that use a cube - this could be done more elegantly. if isinstance(kernel, nn_gridded) or isinstance(kernel, li): if hasattr(kernel, "interpolator"): # If we have an interpolator on the kernel we need to reset it as it depends on the actual values # as well as the coordinates kernel.interpolator = None kernel.coord_names = [] if not isinstance(data, iris.cube.Cube): raise ValueError("Ungridded data cannot be used with kernel nn_gridded or li") if constraint is not None and not isinstance(constraint, DummyConstraint): raise ValueError("A constraint cannot be specified with kernel nn_gridded or li") data_points = data else: data_points = data.get_non_masked_points() # First fix the sample points so that they all fall within the same 360 degree longitude range _fix_longitude_range(points.coords(), sample_points) # Then fix the data points so that they fall onto the same 360 degree longitude range as the sample points _fix_longitude_range(points.coords(), data_points) log_memory_profile("GeneralUngriddedCollocator after data retrieval") # Create index if constraint and/or kernel require one. coord_map = None data_index.create_indexes(constraint, points, data_points, coord_map) data_index.create_indexes(kernel, points, data_points, coord_map) log_memory_profile("GeneralUngriddedCollocator after indexing") logging.info("--> Collocating...") # Create output arrays. self.var_name = data.name() self.var_long_name = metadata.long_name self.var_standard_name = metadata.standard_name self.var_units = data.units var_set_details = kernel.get_variable_details(self.var_name, self.var_long_name, self.var_standard_name, self.var_units) sample_points_count = len(sample_points) values = np.zeros((len(var_set_details), sample_points_count)) + self.fill_value log_memory_profile("GeneralUngriddedCollocator after output array creation") logging.info(" {} sample points".format(sample_points_count)) # Apply constraint and/or kernel to each sample point. cell_count = 0 total_count = 0 for i, point in sample_points.enumerate_non_masked_points(): # Log progress periodically. cell_count += 1 if cell_count == 1000: total_count += cell_count cell_count = 0 logging.info(" Processed {} points of {}".format(total_count, sample_points_count)) if constraint is None: con_points = data_points else: con_points = constraint.constrain_points(point, data_points) try: value_obj = kernel.get_value(point, con_points) # Kernel returns either a single value or a tuple of values to insert into each output variable. if isinstance(value_obj, tuple): for idx, val in enumerate(value_obj): if not np.isnan(val): values[idx, i] = val else: values[0, i] = value_obj except CoordinateMultiDimError as e: raise NotImplementedError(e) except ValueError as e: pass log_memory_profile("GeneralUngriddedCollocator after running kernel on sample points") return_data = UngriddedDataList() for idx, var_details in enumerate(var_set_details): if idx == 0: new_data = UngriddedData(values[0, :], metadata, points.coords()) new_data.metadata._name = var_details[0] new_data.metadata.long_name = var_details[1] cis.utils.set_cube_standard_name_if_valid(new_data, var_details[2]) new_data.metadata.shape = (len(sample_points),) new_data.metadata.missing_value = self.fill_value new_data.units = var_details[2] else: var_metadata = Metadata(name=var_details[0], long_name=var_details[1], shape=(len(sample_points),), missing_value=self.fill_value, units=var_details[2]) new_data = UngriddedData(values[idx, :], var_metadata, points.coords()) return_data.append(new_data) log_memory_profile("GeneralUngriddedCollocator final") return return_data
class TestUngriddedDataList(TestCase): def setUp(self): x_points = np.arange(-10, 11, 5) y_points = np.arange(-5, 6, 5) y, x = np.meshgrid(y_points, x_points) x = Coord(x, Metadata(name='lat', standard_name='latitude', units='degrees')) y = Coord(y, Metadata(name='lon', standard_name='longitude', units='degrees')) data = np.reshape(np.arange(15) + 1.0, (5, 3)) self.coords = CoordList([x, y]) ug1 = UngriddedData(data, Metadata(standard_name='rainfall_flux', long_name="TOTAL RAINFALL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) ug2 = UngriddedData(data * 0.1, Metadata(standard_name='snowfall_flux', long_name="TOTAL SNOWFALL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) self.ungridded_data_list = UngriddedDataList([ug1, ug2]) def test_slicing(self): single_item = self.ungridded_data_list[1] assert_that(isinstance(single_item, UngriddedData)) many_items = self.ungridded_data_list[0:1] assert_that(isinstance(many_items, UngriddedDataList)) many_items = self.ungridded_data_list[0:] assert_that(isinstance(many_items, UngriddedDataList)) def test_combining(self): from cis.test.util.mock import make_regular_2d_ungridded_data another_list = UngriddedDataList([make_regular_2d_ungridded_data(), make_regular_2d_ungridded_data()]) # Test adding assert_that(isinstance(self.ungridded_data_list + another_list, UngriddedDataList)) # Test extending another_list.extend(self.ungridded_data_list) assert_that(isinstance(another_list, UngriddedDataList)) assert_that(len(another_list) == 4) # Test can't add single items with assert_raises(TypeError): self.ungridded_data_list + another_list[0] def test_can_get_string_of_list(self): s = str(self.ungridded_data_list) assert_that(s == "UngriddedDataList: \n0: Ungridded data: rainfall_flux / (kg m-2 s-1) \n" "1: Ungridded data: snowfall_flux / (kg m-2 s-1) \nCoordinates: \n latitude\n longitude\n") def test_GIVEN_data_containing_multiple_matching_coordinates_WHEN_coords_THEN_only_unique_coords_returned(self): unique_coords = self.ungridded_data_list.coords() assert_that(len(unique_coords), is_(2)) assert_that(isinstance(unique_coords, CoordList)) coord_names = [coord.standard_name for coord in unique_coords] assert_that(coord_names, contains_inanyorder('latitude', 'longitude')) def test_can_create_list_from_generators_and_other_iterators(self): from cis.test.util.mock import make_regular_2d_ungridded_data import itertools another_list = UngriddedDataList((make_regular_2d_ungridded_data(), make_regular_2d_ungridded_data())) assert_that(len(another_list) == 2) dict = {1: [make_regular_2d_ungridded_data()], 2: [make_regular_2d_ungridded_data()]} another_list = UngriddedDataList(itertools.chain.from_iterable(d for d in dict.values())) assert_that(len(another_list) == 2) @skip_pandas def test_GIVEN_multiple_ungridded_data_WHEN_call_as_data_frame_THEN_returns_valid_data_frame(self): df = self.ungridded_data_list.as_data_frame() assert_that(df['rainfall_flux'][5] == 6) assert_almost_equal(df['snowfall_flux'][5], 0.6) assert_that(df['latitude'][13] == 10) assert_that(df['longitude'][0] == -5) @skip_pandas def test_GIVEN_multiple_ungridded_data_with_missing_data_WHEN_call_as_data_frame_THEN_returns_valid_data_frame(self): d = np.reshape(np.arange(15) + 10.0, (5, 3)) data = np.ma.masked_array(d, np.zeros(d.shape, dtype=bool)) data.mask[1,2] = True ug3 = UngriddedData(data, Metadata(name='hail', long_name="TOTAL HAIL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) self.ungridded_data_list.append(ug3) df = self.ungridded_data_list.as_data_frame() assert_that(df['rainfall_flux'][5] == 6) assert_almost_equal(df['snowfall_flux'][5], 0.6) assert_that(df['latitude'][13] == 10) assert_that(df['longitude'][0] == -5) assert_almost_equal(df['TOTAL HAIL RATE: LS+CONV KG/M2/S'][1], 11.0) assert_that(np.isnan(df['TOTAL HAIL RATE: LS+CONV KG/M2/S'][np.ravel_multi_index([1, 2], (5, 3))])) self.ungridded_data_list.pop()
class TestUngriddedDataList(TestCase): def setUp(self): x_points = np.arange(-10, 11, 5) y_points = np.arange(-5, 6, 5) y, x = np.meshgrid(y_points, x_points) x = Coord( x, Metadata(name='lat', standard_name='latitude', units='degrees')) y = Coord( y, Metadata(name='lon', standard_name='longitude', units='degrees')) data = np.reshape(np.arange(15) + 1.0, (5, 3)) self.coords = CoordList([x, y]) ug1 = UngriddedData( data, Metadata(standard_name='rainfall_flux', long_name="TOTAL RAINFALL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) ug2 = UngriddedData( data * 0.1, Metadata(standard_name='snowfall_flux', long_name="TOTAL SNOWFALL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) self.ungridded_data_list = UngriddedDataList([ug1, ug2]) def test_slicing(self): single_item = self.ungridded_data_list[1] assert_that(isinstance(single_item, UngriddedData)) many_items = self.ungridded_data_list[0:1] assert_that(isinstance(many_items, UngriddedDataList)) many_items = self.ungridded_data_list[0:] assert_that(isinstance(many_items, UngriddedDataList)) def test_combining(self): from cis.test.util.mock import make_regular_2d_ungridded_data another_list = UngriddedDataList([ make_regular_2d_ungridded_data(), make_regular_2d_ungridded_data() ]) # Test adding assert_that( isinstance(self.ungridded_data_list + another_list, UngriddedDataList)) # Test extending another_list.extend(self.ungridded_data_list) assert_that(isinstance(another_list, UngriddedDataList)) assert_that(len(another_list) == 4) # Test can't add single items with assert_raises(TypeError): self.ungridded_data_list + another_list[0] def test_can_get_string_of_list(self): s = str(self.ungridded_data_list) assert_that( s == "UngriddedDataList: \n0: Ungridded data: rainfall_flux / (kg m-2 s-1) \n" "1: Ungridded data: snowfall_flux / (kg m-2 s-1) \nCoordinates: \n latitude\n longitude\n" ) def test_GIVEN_data_containing_multiple_matching_coordinates_WHEN_coords_THEN_only_unique_coords_returned( self): unique_coords = self.ungridded_data_list.coords() assert_that(len(unique_coords), is_(2)) assert_that(isinstance(unique_coords, CoordList)) coord_names = [coord.standard_name for coord in unique_coords] assert_that(coord_names, contains_inanyorder('latitude', 'longitude')) def test_can_create_list_from_generators_and_other_iterators(self): from cis.test.util.mock import make_regular_2d_ungridded_data import itertools another_list = UngriddedDataList((make_regular_2d_ungridded_data(), make_regular_2d_ungridded_data())) assert_that(len(another_list) == 2) dict = { 1: [make_regular_2d_ungridded_data()], 2: [make_regular_2d_ungridded_data()] } another_list = UngriddedDataList( itertools.chain.from_iterable(d for d in dict.values())) assert_that(len(another_list) == 2) @skip_pandas def test_GIVEN_multiple_ungridded_data_WHEN_call_as_data_frame_THEN_returns_valid_data_frame( self): df = self.ungridded_data_list.as_data_frame() assert_that(df['rainfall_flux'][5] == 6) assert_almost_equal(df['snowfall_flux'][5], 0.6) assert_that(df['latitude'][13] == 10) assert_that(df['longitude'][0] == -5) @skip_pandas def test_GIVEN_multiple_ungridded_data_with_missing_data_WHEN_call_as_data_frame_THEN_returns_valid_data_frame( self): d = np.reshape(np.arange(15) + 10.0, (5, 3)) data = np.ma.masked_array(d, np.zeros(d.shape, dtype=bool)) data.mask[1, 2] = True ug3 = UngriddedData( data, Metadata(name='hail', long_name="TOTAL HAIL RATE: LS+CONV KG/M2/S", units="kg m-2 s-1", missing_value=-999), self.coords) self.ungridded_data_list.append(ug3) df = self.ungridded_data_list.as_data_frame() assert_that(df['rainfall_flux'][5] == 6) assert_almost_equal(df['snowfall_flux'][5], 0.6) assert_that(df['latitude'][13] == 10) assert_that(df['longitude'][0] == -5) assert_almost_equal(df['TOTAL HAIL RATE: LS+CONV KG/M2/S'][1], 11.0) assert_that( np.isnan( df['TOTAL HAIL RATE: LS+CONV KG/M2/S'][np.ravel_multi_index( [1, 2], (5, 3))])) self.ungridded_data_list.pop()