Esempio n. 1
0
    def ancillary(fid):
        # load the ancillary and remove fields not of use to ODC
        # retrieve the averaged ancillary if available
        anc_grp = fid.get(GroupName.ANCILLARY_AVG_GROUP.value)
        if anc_grp is None:
            anc_grp = fid

        dname = DatasetName.AEROSOL.value
        aerosol_data = remove_fields(read_scalar(anc_grp, dname))
        dname = DatasetName.WATER_VAPOUR.value
        water_vapour_data = remove_fields(read_scalar(anc_grp, dname))
        dname = DatasetName.OZONE.value
        ozone_data = remove_fields(read_scalar(anc_grp, dname))

        # currently have multiple sources of elevation data
        elevation_data = elevation_provenance(anc_grp)

        result = {
            'aerosol': aerosol_data,
            'water_vapour': water_vapour_data,
            'ozone': ozone_data,
            'elevation': elevation_data
        }

        if sbt:
            result.update(load_sbt_ancillary(fid))

        if nbar:
            for grp_name in res_group_bands:
                grp_ancillary = load_nbar_ancillary(res_group_bands[grp_name],
                                                    fid)
                result['brdf'] = grp_ancillary

        return result
Esempio n. 2
0
    def load_sbt_ancillary(group):
        """
        Load the sbt ancillary data retrieved during the worlflow.
        """
        point_data = {
            DatasetName.DEWPOINT_TEMPERATURE.value: {},
            DatasetName.SURFACE_GEOPOTENTIAL.value: {},
            DatasetName.TEMPERATURE_2M.value: {},
            DatasetName.SURFACE_RELATIVE_HUMIDITY.value: {},
            DatasetName.GEOPOTENTIAL.value: {},
            DatasetName.RELATIVE_HUMIDITY.value: {},
            DatasetName.TEMPERATURE.value: {}
        }

        npoints = group[DatasetName.COORDINATOR.value].shape[0]
        for point in range(npoints):
            pnt_grp = group[POINT_FMT.format(p=point)]
            lonlat = tuple(pnt_grp.attrs['lonlat'])

            # scalars
            dname = DatasetName.DEWPOINT_TEMPERATURE.value
            point_data[dname][lonlat] = read_scalar(pnt_grp, dname)

            dname = DatasetName.SURFACE_GEOPOTENTIAL.value
            point_data[dname][lonlat] = read_scalar(pnt_grp, dname)

            dname = DatasetName.TEMPERATURE_2M.value
            point_data[dname][lonlat] = read_scalar(pnt_grp, dname)

            dname = DatasetName.SURFACE_RELATIVE_HUMIDITY.value
            point_data[dname][lonlat] = read_scalar(pnt_grp, dname)

            # tables
            dname = DatasetName.GEOPOTENTIAL.value
            dset = pnt_grp[dname]
            attrs = {k: v for k, v in dset.attrs.items()}
            df = read_h5_table(pnt_grp, dname)
            for column in df.columns:
                attrs[column] = df[column].values
            point_data[dname][lonlat] = attrs

            dname = DatasetName.RELATIVE_HUMIDITY.value
            dset = pnt_grp[dname]
            attrs = {k: v for k, v in dset.attrs.items()}
            df = read_h5_table(pnt_grp, dname)
            for column in df.columns:
                attrs[column] = df[column].values
            point_data[dname][lonlat] = attrs

            dname = DatasetName.TEMPERATURE.value
            dset = pnt_grp[dname]
            attrs = {k: v for k, v in dset.attrs.items()}
            df = read_h5_table(pnt_grp, dname)
            for column in df.columns:
                attrs[column] = df[column].values
            point_data[dname][lonlat] = attrs

        return point_data
Esempio n. 3
0
    def test_datetime_attrs(self):
        """
        Test that datetime objects will be converted to iso format
        when writing attributes.
        """
        attrs = {"timestamp": datetime.datetime.now()}

        fname = "test_datetime_attrs.h5"
        with h5py.File(fname, "w", **self.memory_kwargs) as fid:
            hdf5.write_scalar(self.scalar_data, "scalar", fid, attrs=attrs)

            data = hdf5.read_scalar(fid, "scalar")
            self.assertEqual(data["timestamp"], attrs["timestamp"])
Esempio n. 4
0
    def test_datetime_attrs(self):
        """
        Test that datetime objects will be converted to iso format
        when writing attributes.
        """
        attrs = {'timestamp': datetime.datetime.now()}

        fname = 'test_datetime_attrs.h5'
        with h5py.File(fname, **self.memory_kwargs) as fid:
            hdf5.write_scalar(self.scalar_data, 'scalar', fid, attrs=attrs)

            data = hdf5.read_scalar(fid, 'scalar')
            self.assertEqual(data['timestamp'], attrs['timestamp'])
Esempio n. 5
0
    def test_scalar_attributes(self):
        """
        Test the scalar attributes.
        """
        attrs = {"test_attribute": "this is a scalar"}
        data = {"value": self.scalar_data, "CLASS": "SCALAR", "VERSION": "0.1"}

        # insert the attribute into the data dict
        for k, v in attrs.items():
            data[k] = v

        fname = "test_scalar_dataset.h5"
        with h5py.File(fname, "w", **self.memory_kwargs) as fid:
            hdf5.write_scalar(data["value"], "test-scalar", fid, attrs=attrs)

            self.assertDictEqual(hdf5.read_scalar(fid, "test-scalar"), data)
Esempio n. 6
0
    def test_scalar_attributes(self):
        """
        Test the scalar attributes.
        """
        attrs = {'test_attribute': 'this is a scalar'}
        data = {'value': self.scalar_data, 'CLASS': 'SCALAR', 'VERSION': '0.1'}

        # insert the attribute into the data dict
        for k, v in attrs.items():
            data[k] = v

        fname = 'test_scalar_dataset.h5'
        with h5py.File(fname, **self.memory_kwargs) as fid:
            hdf5.write_scalar(data['value'], 'test-scalar', fid, attrs=attrs)

            self.assertDictEqual(hdf5.read_scalar(fid, 'test-scalar'), data)
Esempio n. 7
0
def scalar_residual(ref_fid, test_fid, pathname, out_fid, save_inputs):
    """
    Undertake a simple equivalency test, rather than a numerical
    difference. This allows strings to be compared.

    :param ref_fid:
        A h5py file object (essentially the root Group), containing
        the reference data.

    :param test_fid:
        A h5py file object (essentially the root Group), containing
        the test data.

    :param pathname:
        A `str` containing the pathname to the SCALAR Dataset.

    :param out_fid:
        A h5py file object (essentially the root Group), opened for
        writing the output data.

    :param save_inputs:
        A `bool` indicating whether or not to save the input datasets
        used for evaluating the residuals alongside the results.
        Default is False.

    :return:
        None; This routine will only return None or a print statement,
        this is essential for the HDF5 visit routine.
    """
    class_name = 'SCALAR'
    ref_data = read_scalar(ref_fid, pathname)
    test_data = read_scalar(test_fid, pathname)

    # copy the attrs
    attrs = ref_data.copy()
    attrs.pop('value')
    attrs['description'] = 'Equivalency Test'

    # drop 'file_format' as the conversion tool will try to output that format
    # but currently we're not testing contents, just if it is different
    # so saying we've created a yaml string when it is a simple bool is
    # not correct
    attrs.pop('file_format', None)

    # this'll handle string types, but we won't get a numerical
    # difference value for numerical values, only a bool
    diff = ref_data['value'] == test_data['value']

    # output
    base_dname = pbasename(pathname)
    group_name = ref_fid[pathname].parent.name.strip('/')
    dname = ppjoin('RESULTS', class_name, 'EQUIVALENCY', group_name,
                   base_dname)
    write_scalar(diff, dname, out_fid, attrs)

    if save_inputs:
        # copy the reference data
        out_grp = out_fid.require_group(ppjoin('REFERENCE-DATA', group_name))
        ref_fid.copy(ref_fid[pathname], out_grp)

        # copy the test data
        out_grp = out_fid.require_group(ppjoin('TEST-DATA', group_name))
        test_fid.copy(test_fid[pathname], out_grp)