Exemple #1
0
 def test_decode_cf_variable_with_array_units(self) -> None:
     v = Variable(["t"], [1, 2, 3], {"units": np.array(["foobar"], dtype=object)})
     v_decoded = conventions.decode_cf_variable("test2", v)
     assert_identical(v, v_decoded)
Exemple #2
0
 def test_concat_dim_is_variable(self):
     objs = [Dataset({"x": 0}), Dataset({"x": 1})]
     coord = Variable("y", [3, 4])
     expected = Dataset({"x": ("y", [0, 1]), "y": [3, 4]})
     actual = concat(objs, coord)
     assert_identical(actual, expected)
 def test_0d_int32_encoding(self):
     original = Variable((), np.int32(0), encoding={'dtype': 'int64'})
     expected = Variable((), np.int64(0))
     actual = conventions.maybe_encode_dtype(original)
     self.assertDatasetIdentical(expected, actual)
 def test_concat_dim_is_variable(self):
     objs = [Dataset({'x': 0}), Dataset({'x': 1})]
     coord = Variable('y', [3, 4])
     expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
     actual = concat(objs, coord)
     assert_identical(actual, expected)
Exemple #5
0
    def add_original_variables(dataset, height, srf_size=None):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        tu.add_quality_flags(dataset,
                             FULL_SIZE,
                             FULL_SIZE,
                             chunksizes=CHUNKSIZES)

        # time
        default_array = DefaultData.create_default_array(
            IR_SIZE, IR_SIZE, np.uint32)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint32))
        variable.attrs["standard_name"] = "time"
        variable.attrs["long_name"] = "Acquisition time of pixel"
        tu.add_units(variable, "seconds since 1970-01-01 00:00:00")
        tu.add_offset(variable, TIME_FILL_VALUE)
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["time"] = variable

        dataset["solar_azimuth_angle"] = MVIRI._create_angle_variable_int(
            0.005493164, standard_name="solar_azimuth_angle", unsigned=True)
        dataset["solar_zenith_angle"] = MVIRI._create_angle_variable_int(
            0.005493248, standard_name="solar_zenith_angle")
        dataset["satellite_azimuth_angle"] = MVIRI._create_angle_variable_int(
            0.01,
            standard_name="sensor_azimuth_angle",
            long_name="sensor_azimuth_angle",
            unsigned=True)
        dataset["satellite_zenith_angle"] = MVIRI._create_angle_variable_int(
            0.01, standard_name="platform_zenith_angle", unsigned=True)

        # count_ir
        default_array = DefaultData.create_default_array(
            IR_SIZE, IR_SIZE, np.uint8)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "Infrared Image Counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_ir"] = variable

        # count_wv
        default_array = DefaultData.create_default_array(
            IR_SIZE, IR_SIZE, np.uint8)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "WV Image Counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_wv"] = variable

        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.uint8,
                                                         fill_value=0)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["flag_masks"] = "1, 2, 4, 8, 16, 32"
        variable.attrs[
            "flag_meanings"] = "uncertainty_suspicious uncertainty_too_large space_view_suspicious not_on_earth suspect_time suspect_geo"
        variable.attrs["standard_name"] = "status_flag"
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["data_quality_bitmask"] = variable

        # distance_sun_earth
        dataset["distance_sun_earth"] = tu.create_scalar_float_variable(
            long_name="Sun-Earth distance", units="au")

        # solar_irradiance_vis
        dataset["solar_irradiance_vis"] = tu.create_scalar_float_variable(
            standard_name="solar_irradiance_vis",
            long_name="Solar effective Irradiance",
            units="W*m-2")

        # u_solar_irradiance_vis
        default_array = np.full([], np.NaN, np.float32)
        variable = Variable([], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Uncertainty in Solar effective Irradiance"
        tu.add_units(variable, "Wm^-2")
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.IMG_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.IMG_CORR_UNIT] = corr.DAYS
        variable.attrs[corr.IMG_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "rectangle"
        dataset["u_solar_irradiance_vis"] = variable

        if srf_size is None:
            srf_size = SRF_SIZE

        default_array = DefaultData.create_default_array(srf_size,
                                                         NUM_CHANNELS,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function weights'
        variable.attrs[
            "description"] = 'Per channel: weights for the relative spectral response function'
        tu.add_encoding(variable, np.int16, -32768, 0.000033)
        dataset['SRF_weights'] = variable

        default_array = DefaultData.create_default_array(srf_size,
                                                         NUM_CHANNELS,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function frequencies'
        variable.attrs[
            "description"] = 'Per channel: frequencies for the relative spectral response function'
        tu.add_encoding(variable, np.int32, -2147483648, 0.0001)
        tu.add_units(variable, "nm")
        variable.attrs["source"] = "Filename of SRF"
        variable.attrs["Valid(YYYYDDD)"] = "datestring"
        dataset['SRF_frequencies'] = variable

        # srf covariance_
        default_array = DefaultData.create_default_array(srf_size,
                                                         srf_size,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable([SRF_VIS_DIMENSION, SRF_VIS_DIMENSION],
                            default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Covariance of the Visible Band Spectral Response Function"
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["covariance_spectral_response_function_vis"] = variable

        # u_srf_ir
        default_array = DefaultData.create_default_vector(srf_size,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable([SRF_IR_WV_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Uncertainty in Spectral Response Function for IR channel"
        dataset["u_spectral_response_function_ir"] = variable

        # u_srf_wv
        default_array = DefaultData.create_default_vector(srf_size,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable([SRF_IR_WV_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Uncertainty in Spectral Response Function for WV channel"
        dataset["u_spectral_response_function_wv"] = variable

        dataset["a_ir"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter a for IR Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["b_ir"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter b for IR Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["u_a_ir"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter a for IR Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["u_b_ir"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter b for IR Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["a_wv"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter a for WV Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["b_wv"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter b for WV Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["u_a_wv"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter a for WV Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["u_b_wv"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter b for WV Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["bt_a_ir"] = tu.create_scalar_float_variable(
            long_name="IR Band BT conversion parameter A", units="1")
        dataset["bt_b_ir"] = tu.create_scalar_float_variable(
            long_name="IR Band BT conversion parameter B", units="1")
        dataset["bt_a_wv"] = tu.create_scalar_float_variable(
            long_name="WV Band BT conversion parameter A", units="1")
        dataset["bt_b_wv"] = tu.create_scalar_float_variable(
            long_name="WV Band BT conversion parameter B", units="1")
        dataset["years_since_launch"] = tu.create_scalar_float_variable(
            long_name="Fractional year since launch of satellite",
            units="years")

        x_ir_wv_dim = dataset.dims["x_ir_wv"]
        dataset["x_ir_wv"] = Coordinate(
            "x_ir_wv", np.arange(x_ir_wv_dim, dtype=np.uint16))

        y_ir_wv_dim = dataset.dims["y_ir_wv"]
        dataset["y_ir_wv"] = Coordinate(
            "y_ir_wv", np.arange(y_ir_wv_dim, dtype=np.uint16))

        srf_size_dim = dataset.dims["srf_size"]
        dataset["srf_size"] = Coordinate(
            "srf_size", np.arange(srf_size_dim, dtype=np.uint16))
Exemple #6
0
    def test_lazily_indexed_array(self):
        original = np.random.rand(10, 20, 30)
        x = indexing.NumpyIndexingAdapter(original)
        v = Variable(["i", "j", "k"], original)
        lazy = indexing.LazilyOuterIndexedArray(x)
        v_lazy = Variable(["i", "j", "k"], lazy)
        arr = ReturnItem()
        # test orthogonally applied indexers
        indexers = [
            arr[:], 0, -2, arr[:3], [0, 1, 2, 3], [0],
            np.arange(10) < 5
        ]
        for i in indexers:
            for j in indexers:
                for k in indexers:
                    if isinstance(j, np.ndarray) and j.dtype.kind == "b":
                        j = np.arange(20) < 5
                    if isinstance(k, np.ndarray) and k.dtype.kind == "b":
                        k = np.arange(30) < 5
                    expected = np.asarray(v[i, j, k])
                    for actual in [
                            v_lazy[i, j, k],
                            v_lazy[:, j, k][i],
                            v_lazy[:, :, k][:, j][i],
                    ]:
                        assert expected.shape == actual.shape
                        assert_array_equal(expected, actual)
                        assert isinstance(actual._data,
                                          indexing.LazilyOuterIndexedArray)

                        # make sure actual.key is appropriate type
                        if all(
                                isinstance(k, (int, slice))
                                for k in v_lazy._data.key.tuple):
                            assert isinstance(v_lazy._data.key,
                                              indexing.BasicIndexer)
                        else:
                            assert isinstance(v_lazy._data.key,
                                              indexing.OuterIndexer)

        # test sequentially applied indexers
        indexers = [
            (3, 2),
            (arr[:], 0),
            (arr[:2], -1),
            (arr[:4], [0]),
            ([4, 5], 0),
            ([0, 1, 2], [0, 1]),
            ([0, 3, 5], arr[:2]),
        ]
        for i, j in indexers:
            expected = v[i][j]
            actual = v_lazy[i][j]
            assert expected.shape == actual.shape
            assert_array_equal(expected, actual)

            # test transpose
            if actual.ndim > 1:
                order = np.random.choice(actual.ndim, actual.ndim)
                order = np.array(actual.dims)
                transposed = actual.transpose(*order)
                assert_array_equal(expected.transpose(*order), transposed)
                assert isinstance(
                    actual._data,
                    (
                        indexing.LazilyVectorizedIndexedArray,
                        indexing.LazilyOuterIndexedArray,
                    ),
                )

            assert isinstance(actual._data, indexing.LazilyOuterIndexedArray)
            assert isinstance(actual._data.array,
                              indexing.NumpyIndexingAdapter)
Exemple #7
0
    def add_original_variables(dataset, height, srf_size=None):
        tu.add_geolocation_variables(dataset, SWATH_WIDTH, height)
        tu.add_quality_flags(dataset, SWATH_WIDTH, height)

        # Temperature_misc_housekeeping
        default_array = DefaultData.create_default_array(
            height,
            NUM_THERMISTORS,
            np.float32,
            dims_names=["housekeeping", "y"],
            fill_value=np.NaN)
        variable = Variable(["housekeeping", "y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        variable.attrs["units"] = "TODO"
        dataset["Temperature_misc_housekeeping"] = variable

        # ancil_data
        default_array = DefaultData.create_default_array(
            height,
            ANCIL_VAL,
            np.float64,
            dims_names=["ancil_val", "y"],
            fill_value=np.NaN)
        variable = Variable(["ancil_val", "y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "Additional per scan information: year, day_of_year, secs_of_day, sat_lat, " \
                                      "sat_long, sat_alt, sat_heading, year, day_of_year, secs_of_day"
        dataset["ancil_data"] = variable

        # channel_quality_flag
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        dataset["channel_quality_flag"] = variable

        # cold_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["cold_counts"] = variable

        # counts_to_tb_gain
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["counts_to_tb_gain"] = variable

        # counts_to_tb_offset
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["counts_to_tb_offset"] = variable

        # gain_control
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["gain_control"] = variable

        # tb
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        variable.attrs["standard_name"] = "toa_brightness_temperature"
        tu.add_units(variable, "K")
        dataset["tb"] = variable

        # thermal_reference
        default_array = DefaultData.create_default_vector(
            height, np.float32, np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        tu.add_units(variable, "TODO")
        dataset["thermal_reference"] = variable

        # warm_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["warm_counts"] = variable
Exemple #8
0
 def test_0d_datetime(self):
     v = Variable([], pd.Timestamp('2000-01-01'))
     self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
     self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
Exemple #9
0
 def test_0d_timedelta(self):
     for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
         v = Variable([], td)
         self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
         self.assertEqual(v.values, np.timedelta64(10**9, 'ns'))
Exemple #10
0
 def test_aggregate_complex(self):
     # should skip NaNs
     v = self.cls('x', [1, 2j, np.nan])
     expected = Variable((), 0.5 + 1j)
     self.assertVariableAllClose(v.mean(), expected)
Exemple #11
0
 def test_multiindex(self):
     idx = pd.MultiIndex.from_product([list('abc'), [0, 1]])
     v = self.cls('x', idx)
     self.assertVariableIdentical(Variable((), ('a', 0)), v[0])
     self.assertVariableIdentical(v, v[:])
Exemple #12
0
    def test_convert_units(self, typename, variant):
        if typename == "Variable":
            if variant != "data":
                pytest.skip("Variable doesn't store coordinates")

            data = np.linspace(0, 1, 3) * unit_registry.m
            obj = Variable(dims="x", data=data)
            units = {None: unit_registry.mm}
            expected_units = units
        elif typename == "DataArray":
            unit_variants = {
                "data": (unit_registry.Pa, 1, 1),
                "dims": (1, unit_registry.s, 1),
                "coords": (1, 1, unit_registry.m),
            }
            data_unit, dim_unit, coord_unit = unit_variants.get(variant)

            coords = {
                "data": {},
                "dims": {
                    "x": [0, 1, 2] * dim_unit
                },
                "coords": {
                    "u": ("x", [10, 3, 4] * coord_unit)
                },
            }

            obj = DataArray(
                dims="x",
                data=np.linspace(0, 1, 3) * data_unit,
                coords=coords.get(variant),
            )
            template = {
                **{
                    obj.name: None
                },
                **{name: None
                   for name in obj.coords},
            }
            units = {
                "data": {
                    None: unit_registry.hPa
                },
                "dims": {
                    "x": unit_registry.ms
                },
                "coords": {
                    "u": unit_registry.mm
                },
            }.get(variant)

            expected_units = {**template, **units}
        elif typename == "Dataset":
            unit_variants = {
                "data": ((unit_registry.s, unit_registry.kg), 1, 1),
                "dims": ((1, 1), unit_registry.s, 1),
                "coords": ((1, 1), 1, unit_registry.m),
            }
            (data_unit1,
             data_unit2), dim_unit, coord_unit = unit_variants.get(variant)

            coords = {
                "data": {},
                "dims": {
                    "x": [0, 1, 2] * dim_unit
                },
                "coords": {
                    "u": ("x", [10, 3, 4] * coord_unit)
                },
            }

            obj = Dataset(
                data_vars={
                    "a": ("x", np.linspace(-1, 1, 3) * data_unit1),
                    "b": ("x", np.linspace(1, 2, 3) * data_unit2),
                },
                coords=coords.get(variant),
            )

            template = {
                **{name: None
                   for name in obj.data_vars.keys()},
                **{name: None
                   for name in obj.coords.keys()},
            }
            units = {
                "data": {
                    "a": unit_registry.ms,
                    "b": unit_registry.g
                },
                "dims": {
                    "x": unit_registry.ms
                },
                "coords": {
                    "u": unit_registry.mm
                },
            }.get(variant)
            expected_units = {**template, **units}

        actual = conversion.convert_units(obj, units)

        assert conversion.extract_units(actual) == expected_units
        assert_equal(obj, actual)
Exemple #13
0
class TestXarrayFunctions:
    @pytest.mark.parametrize(
        "obj",
        (
            pytest.param(Variable("x", np.linspace(0, 1, 5)), id="Variable"),
            pytest.param(
                DataArray(
                    data=np.linspace(0, 1, 5),
                    dims="x",
                    coords={"u": ("x", np.arange(5))},
                ),
                id="DataArray",
            ),
            pytest.param(
                Dataset(
                    {
                        "a": ("x", np.linspace(-1, 1, 5)),
                        "b": ("x", np.linspace(0, 1, 5)),
                    },
                    coords={"u": ("x", np.arange(5))},
                ),
                id="Dataset",
            ),
        ),
    )
    @pytest.mark.parametrize(
        "units",
        (
            pytest.param({
                None: None,
                "u": None
            }, id="no units"),
            pytest.param({
                None: unit_registry.m,
                "u": None
            }, id="data units"),
            pytest.param({
                None: None,
                "u": unit_registry.s
            }, id="coord units"),
        ),
    )
    def test_attach_units(self, obj, units):
        if isinstance(obj, Variable) and "u" in units:
            pytest.skip(msg="variables don't have coordinates")

        if isinstance(obj, Dataset):
            units = units.copy()
            data_units = units.pop(None)
            units.update({"a": data_units, "b": data_units})

        actual = conversion.attach_units(obj, units)

        assert conversion.extract_units(actual) == units

    @pytest.mark.parametrize(
        ["obj", "units"],
        (
            pytest.param(
                DataArray(dims="x", coords={
                    "x": [],
                    "u": ("x", [])
                }),
                {
                    None: "hPa",
                    "x": "m"
                },
                id="DataArray",
            ),
            pytest.param(
                Dataset(
                    data_vars={
                        "a": ("x", []),
                        "b": ("x", [])
                    },
                    coords={
                        "x": [],
                        "u": ("x", [])
                    },
                ),
                {
                    "a": "K",
                    "b": "hPa",
                    "u": "m"
                },
                id="Dataset",
            ),
            pytest.param(Variable("x", []), {None: "hPa"}, id="Variable"),
        ),
    )
    def test_attach_unit_attributes(self, obj, units):
        actual = conversion.attach_unit_attributes(obj, units)
        assert units == filter_none_values(
            conversion.extract_unit_attributes(actual))

    @pytest.mark.parametrize(
        "variant",
        (
            "data",
            pytest.param(
                "dims",
                marks=pytest.mark.xfail(reason="indexes don't support units")),
            "coords",
        ),
    )
    @pytest.mark.parametrize("typename", ("Variable", "DataArray", "Dataset"))
    def test_convert_units(self, typename, variant):
        if typename == "Variable":
            if variant != "data":
                pytest.skip("Variable doesn't store coordinates")

            data = np.linspace(0, 1, 3) * unit_registry.m
            obj = Variable(dims="x", data=data)
            units = {None: unit_registry.mm}
            expected_units = units
        elif typename == "DataArray":
            unit_variants = {
                "data": (unit_registry.Pa, 1, 1),
                "dims": (1, unit_registry.s, 1),
                "coords": (1, 1, unit_registry.m),
            }
            data_unit, dim_unit, coord_unit = unit_variants.get(variant)

            coords = {
                "data": {},
                "dims": {
                    "x": [0, 1, 2] * dim_unit
                },
                "coords": {
                    "u": ("x", [10, 3, 4] * coord_unit)
                },
            }

            obj = DataArray(
                dims="x",
                data=np.linspace(0, 1, 3) * data_unit,
                coords=coords.get(variant),
            )
            template = {
                **{
                    obj.name: None
                },
                **{name: None
                   for name in obj.coords},
            }
            units = {
                "data": {
                    None: unit_registry.hPa
                },
                "dims": {
                    "x": unit_registry.ms
                },
                "coords": {
                    "u": unit_registry.mm
                },
            }.get(variant)

            expected_units = {**template, **units}
        elif typename == "Dataset":
            unit_variants = {
                "data": ((unit_registry.s, unit_registry.kg), 1, 1),
                "dims": ((1, 1), unit_registry.s, 1),
                "coords": ((1, 1), 1, unit_registry.m),
            }
            (data_unit1,
             data_unit2), dim_unit, coord_unit = unit_variants.get(variant)

            coords = {
                "data": {},
                "dims": {
                    "x": [0, 1, 2] * dim_unit
                },
                "coords": {
                    "u": ("x", [10, 3, 4] * coord_unit)
                },
            }

            obj = Dataset(
                data_vars={
                    "a": ("x", np.linspace(-1, 1, 3) * data_unit1),
                    "b": ("x", np.linspace(1, 2, 3) * data_unit2),
                },
                coords=coords.get(variant),
            )

            template = {
                **{name: None
                   for name in obj.data_vars.keys()},
                **{name: None
                   for name in obj.coords.keys()},
            }
            units = {
                "data": {
                    "a": unit_registry.ms,
                    "b": unit_registry.g
                },
                "dims": {
                    "x": unit_registry.ms
                },
                "coords": {
                    "u": unit_registry.mm
                },
            }.get(variant)
            expected_units = {**template, **units}

        actual = conversion.convert_units(obj, units)

        assert conversion.extract_units(actual) == expected_units
        assert_equal(obj, actual)

    @pytest.mark.parametrize(
        "units",
        (
            pytest.param({
                None: None,
                "u": None
            }, id="no units"),
            pytest.param({
                None: unit_registry.m,
                "u": None
            }, id="data units"),
            pytest.param({
                None: None,
                "u": unit_registry.s
            }, id="coord units"),
            pytest.param({
                None: unit_registry.m,
                "u": unit_registry.s
            },
                         id="data and coord units"),
        ),
    )
    @pytest.mark.parametrize("typename", ("Variable", "DataArray", "Dataset"))
    def test_extract_units(self, typename, units):
        if typename == "Variable":
            data_units = units.get(None) or 1
            data = np.linspace(0, 1, 2) * data_units

            units = units.copy()
            units.pop("u")

            obj = Variable("x", data)
        elif typename == "DataArray":
            data_units = units.get(None) or 1
            data = np.linspace(0, 1, 2) * data_units

            coord_units = units.get("u") or 1
            coords = {"u": ("x", np.arange(2) * coord_units)}

            obj = DataArray(data, dims="x", coords=coords)
        elif typename == "Dataset":
            data_units = units.get(None)
            data1 = np.linspace(-1, 1, 2) * (data_units or 1)
            data2 = np.linspace(0, 1, 2) * (data_units or 1)

            coord_units = units.get("u") or 1
            coords = {"u": ("x", np.arange(2) * coord_units)}

            units = units.copy()
            units.pop(None)
            units.update({"a": data_units, "b": data_units})

            obj = Dataset({
                "a": ("x", data1),
                "b": ("x", data2)
            },
                          coords=coords)

        assert conversion.extract_units(obj) == units

    @pytest.mark.parametrize(
        ["obj", "expected"],
        (
            pytest.param(
                DataArray(
                    coords={
                        "x": ("x", [], {
                            "units": "m"
                        }),
                        "u": ("x", [], {
                            "units": "s"
                        }),
                    },
                    attrs={"units": "hPa"},
                    dims="x",
                ),
                {
                    "x": "m",
                    "u": "s",
                    None: "hPa"
                },
                id="DataArray",
            ),
            pytest.param(
                Dataset(
                    data_vars={
                        "a": ("x", [], {
                            "units": "K"
                        }),
                        "b": ("x", [], {
                            "units": "hPa"
                        }),
                    },
                    coords={
                        "x": ("x", [], {
                            "units": "m"
                        }),
                        "u": ("x", [], {
                            "units": "s"
                        }),
                    },
                ),
                {
                    "a": "K",
                    "b": "hPa",
                    "x": "m",
                    "u": "s"
                },
                id="Dataset",
            ),
            pytest.param(Variable("x", [], {"units": "hPa"}), {None: "hPa"},
                         id="Variable"),
        ),
    )
    def test_extract_unit_attributes(self, obj, expected):
        actual = conversion.extract_unit_attributes(obj)
        assert expected == actual

    @pytest.mark.parametrize(
        "obj",
        (
            pytest.param(Variable("x", [0, 4, 3] * unit_registry.m),
                         id="Variable"),
            pytest.param(
                DataArray(
                    dims="x",
                    data=[0, 4, 3] * unit_registry.m,
                    coords={"u": ("x", [2, 3, 4] * unit_registry.s)},
                ),
                id="DataArray",
            ),
            pytest.param(
                Dataset(
                    data_vars={
                        "a": ("x", [3, 2, 5] * unit_registry.Pa),
                        "b": ("x", [0, 2, -1] * unit_registry.kg),
                    },
                    coords={"u": ("x", [2, 3, 4] * unit_registry.s)},
                ),
                id="Dataset",
            ),
        ),
    )
    def test_strip_units(self, obj):
        if isinstance(obj, Variable):
            expected_units = {None: None}
        elif isinstance(obj, DataArray):
            expected_units = {None: None}
            expected_units.update({name: None for name in obj.coords.keys()})
        elif isinstance(obj, Dataset):
            expected_units = {name: None for name in obj.variables.keys()}

        actual = conversion.strip_units(obj)
        assert conversion.extract_units(actual) == expected_units

    @pytest.mark.parametrize(
        ["obj", "expected"],
        (
            pytest.param(
                DataArray(
                    coords={
                        "x": ("x", [], {
                            "units": "m"
                        }),
                        "u": ("x", [], {
                            "units": "s"
                        }),
                    },
                    attrs={"units": "hPa"},
                    dims="x",
                ),
                {
                    "x": "m",
                    "u": "s",
                    None: "hPa"
                },
                id="DataArray",
            ),
            pytest.param(
                Dataset(
                    data_vars={
                        "a": ("x", [], {
                            "units": "K"
                        }),
                        "b": ("x", [], {
                            "units": "hPa"
                        }),
                    },
                    coords={
                        "x": ("x", [], {
                            "units": "m"
                        }),
                        "u": ("x", [], {
                            "units": "s"
                        }),
                    },
                ),
                {
                    "a": "K",
                    "b": "hPa",
                    "x": "m",
                    "u": "s"
                },
                id="Dataset",
            ),
            pytest.param(Variable("x", [], {"units": "hPa"}), {None: "hPa"},
                         id="Variable"),
        ),
    )
    def test_strip_unit_attributes(self, obj, expected):
        actual = conversion.strip_unit_attributes(obj)
        expected = {}

        assert (filter_none_values(
            conversion.extract_unit_attributes(actual)) == expected)
Exemple #14
0
 def test_missing_fillvalue(self) -> None:
     v = Variable(["x"], np.array([np.nan, 1, 2, 3]))
     v.encoding = {"dtype": "int16"}
     with pytest.warns(Warning, match="floating point data as an integer"):
         conventions.encode_cf_variable(v)
Exemple #15
0
    def setUp(self):
        self.values = np.random.RandomState(0).randn(4, 6)
        self.data = da.from_array(self.values, chunks=(2, 2))

        self.eager_var = Variable(('x', 'y'), self.values)
        self.lazy_var = Variable(('x', 'y'), self.data)
Exemple #16
0
 def test_repr_lazy_data(self):
     v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
     self.assertIn('200000 values with dtype', repr(v))
     self.assertIsInstance(v._data, LazilyIndexedArray)
Exemple #17
0
    def test_remap_label_indexers(self):
        def test_indexer(data, x, expected_pos, expected_idx=None):
            pos, idx = indexing.remap_label_indexers(data, {"x": x})
            assert_array_equal(pos.get("x"), expected_pos)
            assert_array_equal(idx.get("x"), expected_idx)

        data = Dataset({"x": ("x", [1, 2, 3])})
        mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2], [-1, -2]],
                                            names=("one", "two", "three"))
        mdata = DataArray(range(8), [("x", mindex)])

        test_indexer(data, 1, 0)
        test_indexer(data, np.int32(1), 0)
        test_indexer(data, Variable([], 1), 0)
        test_indexer(mdata, ("a", 1, -1), 0)
        test_indexer(
            mdata,
            ("a", 1),
            [True, True, False, False, False, False, False, False],
            [-1, -2],
        )
        test_indexer(
            mdata,
            "a",
            slice(0, 4, None),
            pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
        )
        test_indexer(
            mdata,
            ("a", ),
            [True, True, True, True, False, False, False, False],
            pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
        )
        test_indexer(mdata, [("a", 1, -1), ("b", 2, -2)], [0, 7])
        test_indexer(mdata, slice("a", "b"), slice(0, 8, None))
        test_indexer(mdata, slice(("a", 1), ("b", 1)), slice(0, 6, None))
        test_indexer(mdata, {"one": "a", "two": 1, "three": -1}, 0)
        test_indexer(
            mdata,
            {
                "one": "a",
                "two": 1
            },
            [True, True, False, False, False, False, False, False],
            [-1, -2],
        )
        test_indexer(
            mdata,
            {
                "one": "a",
                "three": -1
            },
            [True, False, True, False, False, False, False, False],
            [1, 2],
        )
        test_indexer(
            mdata,
            {"one": "a"},
            [True, True, True, True, False, False, False, False],
            pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
        )
Exemple #18
0
 def test_indexing_0d_unicode(self):
     # regression test for GH568
     actual = Variable(('x'), [u'tmax'])[0][()]
     expected = Variable((), u'tmax')
     self.assertVariableIdentical(actual, expected)
Exemple #19
0
    def test_vectorized_lazily_indexed_array(self):
        original = np.random.rand(10, 20, 30)
        x = indexing.NumpyIndexingAdapter(original)
        v_eager = Variable(["i", "j", "k"], x)
        lazy = indexing.LazilyOuterIndexedArray(x)
        v_lazy = Variable(["i", "j", "k"], lazy)
        arr = ReturnItem()

        def check_indexing(v_eager, v_lazy, indexers):
            for indexer in indexers:
                actual = v_lazy[indexer]
                expected = v_eager[indexer]
                assert expected.shape == actual.shape
                assert isinstance(
                    actual._data,
                    (
                        indexing.LazilyVectorizedIndexedArray,
                        indexing.LazilyOuterIndexedArray,
                    ),
                )
                assert_array_equal(expected, actual)
                v_eager = expected
                v_lazy = actual

        # test orthogonal indexing
        indexers = [(arr[:], 0, 1), (Variable("i", [0, 1]), )]
        check_indexing(v_eager, v_lazy, indexers)

        # vectorized indexing
        indexers = [
            (Variable("i", [0, 1]), Variable("i", [0, 1]), slice(None)),
            (slice(1, 3, 2), 0),
        ]
        check_indexing(v_eager, v_lazy, indexers)

        indexers = [
            (slice(None, None, 2), 0, slice(None, 10)),
            (Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
            (Variable(["i", "j"], [[0, 1], [1, 2]]), ),
        ]
        check_indexing(v_eager, v_lazy, indexers)

        indexers = [
            (Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
            (Variable(["i", "j"], [[0, 1], [1, 2]]), ),
        ]
        check_indexing(v_eager, v_lazy, indexers)
Exemple #20
0
 def test_shift2d(self):
     v = Variable(('x', 'y'), [[1, 2], [3, 4]])
     expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]])
     self.assertVariableIdentical(expected, v.shift(x=1, y=1))
Exemple #21
0
    def add_full_fcdr_variables(dataset, height):
        # u_Temperature_misc_housekeeping
        default_array = DefaultData.create_default_array(
            height,
            NUM_THERMISTORS,
            np.float32,
            dims_names=["housekeeping", "y"],
            fill_value=np.NaN)
        variable = Variable(["housekeeping", "y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        variable.attrs["units"] = "TODO"
        dataset["u_Temperature_misc_housekeeping"] = variable

        # u_cold_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_cold_counts"] = variable

        # u_counts_to_tb_gain
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_counts_to_tb_gain"] = variable

        # u_counts_to_tb_offset
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_counts_to_tb_offset"] = variable

        # u_gain_control
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_gain_control"] = variable

        # u_tb
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        tu.add_units(variable, "K")
        dataset["u_tb"] = variable

        # u_thermal_reference
        default_array = DefaultData.create_default_vector(
            height, np.float32, np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        tu.add_units(variable, "TODO")
        dataset["u_thermal_reference"] = variable

        # u_warm_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_warm_counts"] = variable
Exemple #22
0
 def test_stack_unstack_consistency(self):
     v = Variable(['x', 'y'], [[0, 1], [2, 3]])
     actual = (v.stack(z=('x', 'y')).unstack(z=OrderedDict([('x',
                                                             2), ('y',
                                                                  2)])))
     self.assertVariableIdentical(actual, v)
Exemple #23
0
    def add_easy_fcdr_variables(dataset,
                                height,
                                corr_dx=None,
                                corr_dy=None,
                                lut_size=None):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        # reflectance
        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["standard_name"] = "toa_bidirectional_reflectance_vis"
        variable.attrs[
            "long_name"] = "top of atmosphere bidirectional reflectance factor per pixel of the visible band with central wavelength 0.7"
        tu.add_units(variable, "1")
        tu.add_encoding(variable,
                        np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        3.05176E-05,
                        chunksizes=CHUNKSIZES)
        dataset["toa_bidirectional_reflectance_vis"] = variable

        # u_independent
        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["long_name"] = "independent uncertainty per pixel"
        tu.add_units(variable, "1")
        tu.add_encoding(variable,
                        np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        3.05176E-05,
                        chunksizes=CHUNKSIZES)
        dataset["u_independent_toa_bidirectional_reflectance"] = variable

        # u_structured
        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["long_name"] = "structured uncertainty per pixel"
        tu.add_units(variable, "1")
        tu.add_encoding(variable,
                        np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        3.05176E-05,
                        chunksizes=CHUNKSIZES)
        dataset["u_structured_toa_bidirectional_reflectance"] = variable

        # u_common
        dataset[
            "u_common_toa_bidirectional_reflectance"] = tu.create_scalar_float_variable(
                long_name="common uncertainty per slot", units="1")

        dataset[
            "sub_satellite_latitude_start"] = tu.create_scalar_float_variable(
                long_name="Latitude of the sub satellite point at image start",
                units="degrees_north")
        dataset[
            "sub_satellite_longitude_start"] = tu.create_scalar_float_variable(
                long_name="Longitude of the sub satellite point at image start",
                units="degrees_east")
        dataset[
            "sub_satellite_latitude_end"] = tu.create_scalar_float_variable(
                long_name="Latitude of the sub satellite point at image end",
                units="degrees_north")
        dataset[
            "sub_satellite_longitude_end"] = tu.create_scalar_float_variable(
                long_name="Longitude of the sub satellite point at image end",
                units="degrees_east")

        tu.add_correlation_matrices(dataset, NUM_CHANNELS)

        if lut_size is not None:
            tu.add_lookup_tables(dataset, NUM_CHANNELS, lut_size=lut_size)

        if corr_dx is not None and corr_dy is not None:
            tu.add_correlation_coefficients(dataset, NUM_CHANNELS, corr_dx,
                                            corr_dy)

        tu.add_coordinates(dataset, ["vis", "wv", "ir"])
Exemple #24
0
 def test_big_endian_reduce(self):
     # regression test for GH489
     data = np.ones(5, dtype='>f4')
     v = Variable(['x'], data)
     expected = Variable([], 5)
     self.assertVariableIdentical(expected, v.sum())
Exemple #25
0
    def add_full_fcdr_variables(dataset, height):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        # count_vis
        default_array = DefaultData.create_default_array(
            FULL_SIZE, FULL_SIZE, np.uint8)
        variable = Variable(["y", "x"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "Image counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_vis"] = variable

        dataset["u_latitude"] = MVIRI._create_angle_variable_int(
            1.5E-05, long_name="Uncertainty in Latitude", unsigned=True)
        MVIRI._add_geo_correlation_attributes(dataset["u_latitude"])

        dataset["u_longitude"] = MVIRI._create_angle_variable_int(
            1.5E-05, long_name="Uncertainty in Longitude", unsigned=True)
        MVIRI._add_geo_correlation_attributes(dataset["u_longitude"])

        # u_time
        default_array = DefaultData.create_default_vector(IR_SIZE,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable([IR_Y_DIMENSION], default_array)
        variable.attrs["standard_name"] = "Uncertainty in Time"
        tu.add_units(variable, "s")
        tu.add_encoding(variable, np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        0.009155273)
        variable.attrs["pdf_shape"] = "rectangle"
        dataset["u_time"] = variable

        dataset["u_satellite_zenith_angle"] = MVIRI._create_angle_variable_int(
            7.62939E-05,
            long_name="Uncertainty in Satellite Zenith Angle",
            unsigned=True)
        dataset[
            "u_satellite_azimuth_angle"] = MVIRI._create_angle_variable_int(
                7.62939E-05,
                long_name="Uncertainty in Satellite Azimuth Angle",
                unsigned=True)
        dataset["u_solar_zenith_angle"] = MVIRI._create_angle_variable_int(
            7.62939E-05,
            long_name="Uncertainty in Solar Zenith Angle",
            unsigned=True)
        dataset["u_solar_azimuth_angle"] = MVIRI._create_angle_variable_int(
            7.62939E-05,
            long_name="Uncertainty in Solar Azimuth Angle",
            unsigned=True)

        dataset["a0_vis"] = tu.create_scalar_float_variable(
            "Calibration Coefficient at Launch", units="Wm^-2sr^-1/count")
        dataset["a1_vis"] = tu.create_scalar_float_variable(
            "Time variation of a0", units="Wm^-2sr^-1/count day^-1 10^5")
        dataset["a2_vis"] = tu.create_scalar_float_variable(
            "Time variation of a0, quadratic term",
            units="Wm^-2sr^-1/count year^-2")
        dataset["mean_count_space_vis"] = tu.create_scalar_float_variable(
            "Space count", units="count")

        # u_a0_vis
        variable = tu.create_scalar_float_variable("Uncertainty in a0",
                                                   units="Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a0_vis"] = variable

        # u_a1_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty in a1", units="Wm^-2sr^-1/count day^-1 10^5")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a1_vis"] = variable

        # u_a2_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty in a2", units="Wm^-2sr^-1/count year^-2")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a2_vis"] = variable

        # u_zero_vis
        variable = tu.create_scalar_float_variable("Uncertainty zero term",
                                                   units="Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(
            variable, image_correlation_scale=[-np.inf, np.inf])
        dataset["u_zero_vis"] = variable

        # covariance_a_vis
        variable = tu.create_float_variable(
            COV_SIZE,
            COV_SIZE,
            long_name=
            "Covariance of calibration coefficients from fit to calibration runs",
            dim_names=["cov_size", "cov_size"],
            fill_value=np.NaN)
        tu.add_fill_value(variable, np.NaN)
        tu.add_units(variable, "Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(
            variable, image_correlation_scale=[-np.inf, np.inf])
        dataset["covariance_a_vis"] = variable

        dataset["u_electronics_counts_vis"] = tu.create_scalar_float_variable(
            "Uncertainty due to Electronics noise", units="count")
        dataset["u_digitization_counts_vis"] = tu.create_scalar_float_variable(
            "Uncertainty due to digitization", units="count")

        # allan_deviation_counts_space_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty of space count", units="count")
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "digitised_gaussian"
        dataset["allan_deviation_counts_space_vis"] = variable

        # u_mean_counts_space_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty of space count", units="count")
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "digitised_gaussian"
        dataset["u_mean_counts_space_vis"] = variable

        # sensitivity_solar_irradiance_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis * solar_irradiance_vis)"
        dataset["sensitivity_solar_irradiance_vis"] = variable

        # sensitivity_count_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_count_vis"] = variable

        # sensitivity_count_space
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "-1.0 * distance_sun_earth * distance_sun_earth * PI * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_count_space"] = variable

        # sensitivity_a0_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a0_vis"] = variable

        # sensitivity_a1_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * years_since_launch / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a1_vis"] = variable

        # sensitivity_a2_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * years_since_launch*years_since_launch / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a2_vis"] = variable

        effect_names = [
            "u_solar_irradiance_vis", "u_a0_vis", "u_a1_vis", "u_a2_vis",
            "u_zero_vis", "u_solar_zenith_angle", "u_mean_count_space_vis"
        ]
        dataset["Ne"] = Coordinate("Ne", effect_names)

        num_effects = len(effect_names)
        default_array = DefaultData.create_default_array(num_effects,
                                                         num_effects,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["Ne", "Ne"], default_array)
        tu.add_encoding(variable, np.int16, -32768, 3.05176E-05)
        variable.attrs["valid_min"] = -1
        variable.attrs["valid_max"] = 1
        variable.attrs[
            "long_name"] = "Channel error correlation matrix for structured effects."
        variable.attrs[
            "description"] = "Matrix_describing correlations between errors of the uncertainty_effects due to spectral response function errors (determined using Monte Carlo approach)"
        dataset["effect_correlation_matrix"] = variable
Exemple #26
0
    def test_reduce_funcs(self):
        v = Variable('x', np.array([1, np.nan, 2, 3]))
        self.assertVariableIdentical(v.mean(), Variable([], 2))
        self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
        self.assertVariableIdentical(v.mean(skipna=False), Variable([],
                                                                    np.nan))
        self.assertVariableIdentical(np.mean(v), Variable([], 2))

        self.assertVariableIdentical(v.prod(), Variable([], 6))
        self.assertVariableIdentical(v.cumsum(axis=0),
                                     Variable('x', np.array([1, 1, 3, 6])))
        self.assertVariableIdentical(v.cumprod(axis=0),
                                     Variable('x', np.array([1, 1, 2, 6])))
        self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))

        if LooseVersion(np.__version__) < '1.9':
            with self.assertRaises(NotImplementedError):
                v.median()
        else:
            self.assertVariableIdentical(v.median(), Variable([], 2))

        v = Variable('x', [True, False, False])
        self.assertVariableIdentical(v.any(), Variable([], True))
        self.assertVariableIdentical(v.all(dim='x'), Variable([], False))

        v = Variable('t', pd.date_range('2000-01-01', periods=3))
        with self.assertRaises(NotImplementedError):
            v.max(skipna=True)
        self.assertVariableIdentical(v.max(),
                                     Variable([], pd.Timestamp('2000-01-03')))
 def test_missing_fillvalue(self):
     v = Variable(['x'], np.array([np.nan, 1, 2, 3]))
     v.encoding = {'dtype': 'int16'}
     with self.assertWarns('floating point data as an integer'):
         conventions.encode_cf_variable(v)
    coder = strings.EncodedStringCoder(allows_unicode=True)
    raw = Variable(('x',), raw_data, encoding={'dtype': 'S1'})
    actual = coder.encode(raw)
    expected = Variable(('x',), expected_data, attrs={'_Encoding': 'utf-8'})
    assert_identical(actual, expected)

    raw = Variable(('x',), raw_data)
    assert_identical(coder.encode(raw), raw)

    coder = strings.EncodedStringCoder(allows_unicode=False)
    assert_identical(coder.encode(raw), expected)


@pytest.mark.parametrize('original', [
    Variable(('x',), [b'ab', b'cdef']),
    Variable((), b'ab'),
    Variable(('x',), [b'a', b'b']),
    Variable((), b'a'),
])
def test_CharacterArrayCoder_roundtrip(original):
    coder = strings.CharacterArrayCoder()
    roundtripped = coder.decode(coder.encode(original))
    assert_identical(original, roundtripped)


@pytest.mark.parametrize('data', [
    np.array([b'a', b'bc']),
    np.array([b'a', b'bc'], dtype=strings.create_vlen_dtype(bytes_type)),
])
def test_CharacterArrayCoder_encode(data):
Exemple #29
0
    def add_full_fcdr_variables(dataset, height):
        # c_earth
        default_array = DefaultData.create_default_array_3d(SWATH_WIDTH, height, NUM_RAD_CHANNELS, np.uint16, dims_names=["rad_channel", "y", "x"])
        variable = Variable(["rad_channel", "y", "x"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint16))
        variable.attrs["long_name"] = "counts_earth"
        tu.add_units(variable, "count")
        variable.attrs["ancilliary_variables"] = "scnlinf quality_scanline_bitmask quality_channel_bitmask mnfrqualflags"
        dataset["c_earth"] = variable

        # L_earth
        default_array = DefaultData.create_default_array_3d(SWATH_WIDTH, height, NUM_RAD_CHANNELS, np.float32, np.NaN, ["rad_channel", "y", "x"])
        variable = Variable(["rad_channel", "y", "x"], default_array)
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.0001)
        variable.attrs["standard_name"] = "toa_outgoing_inband_radiance"
        tu.add_units(variable, "W/Hz/m ** 2/sr")
        variable.attrs["long_name"] = "Channel radiance, NOAA/EUMETSAT calibrated"
        variable.attrs["ancilliary_variables"] = "scnlinf quality_scanline_bitmask quality_channel_bitmask mnfrqualflags"
        dataset["L_earth"] = variable

        # u_lat
        variable = HIRS._create_angle_variable(height, "uncertainty_latitude")
        dataset["u_lat"] = variable

        # u_lon
        variable = HIRS._create_angle_variable(height, "uncertainty_longitude")
        dataset["u_lon"] = variable

        # u_time
        variable = tu.create_float_variable(SWATH_WIDTH, height, "uncertainty_time")
        tu.add_encoding(variable, np.uint16, 65535, 0.01)
        tu.add_units(variable, "s")
        dataset["u_time"] = variable

        # u_c_earth
        default_array = DefaultData.create_default_array(NUM_CALIBRATION_CYCLE, NUM_CHANNELS, np.uint16, dims_names=["channel", "calibration_cycle"])
        variable = Variable(["channel", "calibration_cycle"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint16))
        tu.add_units(variable, "count")
        variable.attrs["long_name"] = "uncertainty counts for Earth views"
        variable.attrs["ancilliary_variables"] = "u_c_earth_chan_corr"
        variable.attrs["channels_affected"] = "all"
        variable.attrs["parameter"] = "C_E"
        variable.attrs["pdf_shape"] = "gaussian"
        dataset["u_c_earth"] = variable

        # u_L_earth_independent
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_random")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_independent"] = variable

        # u_L_earth_structured
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_structured")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_structured"] = variable

        # u_L_earth_systematic
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_systematic")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_systematic"] = variable

        # u_L_earth_total
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_total")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_total"] = variable

        # S_u_L_earth
        variable = tu.create_float_variable(NUM_RAD_CHANNELS, NUM_RAD_CHANNELS, "covariance_radiance_Earth", dim_names=["rad_channel", "rad_channel"])
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        dataset["S_u_L_earth"] = variable

        # u_bt_random
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_random")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_random"] = variable

        # u_bt_structured
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_structured")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_structured"] = variable

        # u_bt_systematic
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_systematic")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_systematic"] = variable

        # u_bt_total
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_total")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_total"] = variable

        # S_bt
        variable = tu.create_float_variable(NUM_RAD_CHANNELS, NUM_RAD_CHANNELS, "covariance_brightness_temperature", dim_names=["rad_channel", "rad_channel"])
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        dataset["S_bt"] = variable

        # l1b_calcof
        default_array = DefaultData.create_default_array(height, NUM_COEFFS, np.float32, dims_names=["coeffs", "y"])
        variable = Variable(["coeffs", "y"], default_array)
        tu.add_encoding(variable, np.int32, DefaultData.get_default_fill_value(np.int32), 0.01)
        variable.attrs["standard_name"] = "calibration_coefficients"
        dataset["l1b_calcof"] = variable

        # navigation_status
        variable = HIRS._create_int32_vector(height, standard_name="status_flag", long_name="Navigation status bit field", orig_name="hrs_navstat")
        dataset["navigation_status"] = variable

        # quality_flags
        variable = HIRS._create_int32_vector(height, standard_name="status_flag", long_name="Quality indicator bit field", orig_name="hrs_qualind")
        dataset["quality_flags"] = variable

        variable = HIRS._create_scaled_uint16_vector(height, long_name="Platform altitude", original_name="hrs_scalti")
        tu.add_units(variable, "km")
        dataset["platform_altitude"] = variable

        variable = HIRS._create_scaled_int16_vector(height, long_name="Platform pitch angle", original_name="hrs_pitchang")
        tu.add_units(variable, "degree")
        dataset["platform_pitch_angle"] = variable

        variable = HIRS._create_scaled_int16_vector(height, long_name="Platform roll angle", original_name="hrs_rollang")
        tu.add_units(variable, "degree")
        dataset["platform_roll_angle"] = variable

        variable = HIRS._create_scaled_int16_vector(height, long_name="Platform yaw angle", original_name="hrs_yawang")
        tu.add_units(variable, "degree")
        dataset["platform_yaw_angle"] = variable

        # scan_angles
        default_array = DefaultData.create_default_array(NUM_SCAN_ANGLES, height, np.float32, dims_names=["y", "num_scan_angles"], fill_value=np.NaN)
        variable = Variable(["y", "num_scan_angles"], default_array)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), scale_factor=0.01)
        tu.add_units(variable, "degree")
        variable.attrs["long_name"] = "Scan angles"
        variable.attrs["orig_name"] = "hrs_ang"
        dataset["scan_angles"] = variable

        dataset["l1b_scanline_number"] = HIRS._create_int16_vector(height, long_name="scanline number", orig_name="hrs_scnlin")
        dataset["scanline_position"] = HIRS._create_int8_vector(height, long_name="Scanline position number in 32 second cycle", orig_name="hrs_scnpos")

        # second_original_calibration_coefficients
        default_array = DefaultData.create_default_array(WIDTH_TODO, height, np.float32, fill_value=np.NaN)
        variable = Variable(["y", "width_todo"], default_array)
        tu.add_encoding(variable, np.int32, DefaultData.get_default_fill_value(np.int32), scale_factor=0.01)
        variable.attrs["long_name"] = "Second original calibration coefficients (unsorted)"
        variable.attrs["orig_name"] = "hrs_scalcof"
        dataset["l1b_second_original_calibration_coefficients"] = variable

        dataset["Tc_baseplate"] = HIRS._create_counts_vector(height, "temperature_baseplate_counts")
        dataset["Tc_ch"] = HIRS._create_counts_vector(height, "temperature_coolerhousing_counts")
        dataset["Tc_elec"] = HIRS._create_counts_vector(height, "temperature_electronics_counts")
        dataset["Tc_fsr"] = HIRS._create_counts_vector(height, "temperature_first_stage_radiator_counts")
        dataset["Tc_fwh"] = HIRS._create_counts_vector(height, "temperature_filter_wheel_housing_counts")
        dataset["Tc_fwm"] = HIRS._create_counts_vector(height, "temperature_filter_wheel_monitor_counts")
        dataset["Tc_icct"] = HIRS._create_counts_vector(height, "temperature_internal_cold_calibration_target_counts")
        dataset["Tc_iwct"] = HIRS._create_counts_vector(height, "temperature_internal_warm_calibration_target_counts")
        dataset["Tc_patch_exp"] = HIRS._create_counts_vector(height, "temperature_patch_expanded_scale_counts")
        dataset["Tc_patch_full"] = HIRS._create_counts_vector(height, "temperature_patch_full_range_counts")
        dataset["Tc_tlscp_prim"] = HIRS._create_counts_vector(height, "temperature_telescope_primary_counts")
        dataset["Tc_tlscp_sec"] = HIRS._create_counts_vector(height, "temperature_telescope_secondary_counts")
        dataset["Tc_tlscp_tert"] = HIRS._create_counts_vector(height, "temperature_telescope_tertiary_counts")
        dataset["Tc_scanmirror"] = HIRS._create_counts_vector(height, "temperature_scanmirror_counts")
        dataset["Tc_scanmotor"] = HIRS._create_counts_vector(height, "temperature_scanmotor_counts")

        dataset["u_Tc_baseplate"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_baseplate_counts")
        dataset["u_Tc_ch"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_coolerhousing_counts")
        dataset["u_Tc_elec"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_electronics_counts")
        dataset["u_Tc_fsr"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_first_stage_radiator_counts")
        dataset["u_Tc_fwh"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_filter_wheel_housing_counts")
        dataset["u_Tc_fwm"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_filter_wheel_monitor_counts")
        dataset["u_Tc_icct"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_internal_cold_calibration_target_counts")
        dataset["u_Tc_iwct"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_internal_warm_calibration_target_counts")
        dataset["u_Tc_patch_exp"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_patch_expanded_scale_counts")
        dataset["u_Tc_patch_full"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_patch_full_range_counts")
        dataset["u_Tc_tlscp_prim"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_telescope_primary_counts")
        dataset["u_Tc_tlscp_sec"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_telescope_secondary_counts")
        dataset["u_Tc_tlscp_tert"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_telescope_tertiary_counts")
        dataset["u_Tc_scanmirror"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_scanmirror_counts")
        dataset["u_Tc_scanmotor"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_scanmotor_counts")

        dataset["u_sol_za"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_solar_zenith_angle", height, FILL_VALUE)
        dataset["u_sol_aa"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_solar_azimuth_angle", height, FILL_VALUE)
        dataset["u_sat_za"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_satellite_zenith_angle", height, FILL_VALUE)
        dataset["u_sat_aa"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_local_azimuth_angle", height, FILL_VALUE)

        # u_c_earth_chan_corr
        dataset["u_c_earth_chan_corr"] = HIRS._create_channel_correlation_variable("u_c_earth channel correlations", np.int16)

        # u_c_space
        default_array = DefaultData.create_default_array(NUM_CALIBRATION_CYCLE, NUM_CHANNELS, np.uint16, dims_names=["channel", "calibration_cycle"])
        variable = Variable(["channel", "calibration_cycle"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint16))
        tu.add_units(variable, "count")
        tu.add_scale_factor(variable, 0.005)
        variable.attrs["long_name"] = "uncertainty counts for space views"
        variable.attrs["ancilliary_variables"] = "u_c_space_chan_corr"
        variable.attrs["channels_affected"] = "all"
        variable.attrs["parameter"] = "C_s"
        variable.attrs["pdf_shape"] = "gaussian"
        dataset["u_c_space"] = variable

        # u_c_space_chan_corr
        dataset["u_c_space_chan_corr"] = HIRS._create_channel_correlation_variable("u_c_space channel correlations", np.uint16)

        # u_Earthshine
        dataset["u_Earthshine"] = HIRS._create_channel_uncertainty_uint16(height)

        # u_O_Re
        dataset["u_O_Re"] = HIRS._create_channel_uncertainty_uint16(height)

        # u_O_TIWCT
        default_array = DefaultData.create_default_vector(height, np.float32, np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        dataset["u_O_TIWCT"] = variable

        # u_O_TPRT
        default_array = DefaultData.create_default_vector(height, np.uint16, DefaultData.get_default_fill_value(np.uint16))
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, 65535)
        tu.add_scale_factor(variable, 0.01)
        tu.add_units(variable, "K")
        variable.attrs["channels_affected"] = "all"
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.IMG_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.IMG_CORR_UNIT] = corr.IMG
        variable.attrs[corr.IMG_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["parameter"] = "O_TPRT"
        variable.attrs["pdf_shape"] = "gaussian"
        variable.attrs["short_name"] = "O_TPRT"
        variable.attrs["ancilliary_variables"] = "u_O_TPRT_chan_corr"
        dataset["u_O_TPRT"] = variable

        dataset["u_Rself"] = HIRS._create_channel_uncertainty_uint16(height)
        dataset["u_SRF_calib"] = HIRS._create_channel_uncertainty_uint16(height)

        default_array = DefaultData.create_default_array(PRT_NUMBER_IWT, PRT_READING, dtype=np.float32, dims_names=["prt_number_iwt", "prt_reading"], fill_value=np.NaN)
        variable = Variable(["prt_number_iwt", "prt_reading"], default_array)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        dataset["u_d_PRT"] = variable

        dataset["u_electronics"] = HIRS._create_channel_uncertainty_uint16(height)
        dataset["u_periodic_noise"] = HIRS._create_channel_uncertainty_uint16(height)
        dataset["u_nonlinearity"] = HIRS._create_scaled_uint16_vector(NUM_CHANNELS, dimension_name=["channel"], scale_factor=0.01)
        dataset["emissivity"] = tu.create_scalar_float_variable("emissivity", units="1")
        dataset["temp_corr_slope"] = tu.create_scalar_float_variable("Slope for effective temperature correction", units="1")
        dataset["temp_corr_offset"] = tu.create_scalar_float_variable("Offset for effective temperature correction", units="1")

        # mnfrqualflags
        default_array = DefaultData.create_default_array(NUM_MINOR_FRAME, height, np.int32, dims_names=["y", "minor_frame"], fill_value=0)
        variable = Variable(["y", "minor_frame"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "minor_frame_quality_flags_bitfield"
        dataset["mnfrqualflags"] = variable

        # scnlintime
        variable = HIRS._create_int32_vector(height, standard_name="time", long_name="Scan line time of day", orig_name="hrs_scnlintime")
        tu.add_units(variable, "ms")
        dataset["scnlintime"] = variable

        # scnlinf
        default_array = DefaultData.create_default_vector(height, np.int16, fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "scanline_bitfield"
        variable.attrs["flag_masks"] = "16384, 32768"
        variable.attrs["flag_meanings"] = "clock_drift_correction southbound_data"
        dataset["scnlinf"] = variable

        # scantype
        default_array = DefaultData.create_default_vector(height, np.int8, fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "scantype_bitfield"
        variable.attrs["flag_values"] = "0, 1, 2, 3"
        variable.attrs["flag_meanings"] = "earth_view space_view cold_bb_view main_bb_view"
        dataset["scantype"] = variable
Exemple #30
0
 def test_0d_int32_encoding(self) -> None:
     original = Variable((), np.int32(0), encoding={"dtype": "int64"})
     expected = Variable((), np.int64(0))
     actual = conventions.maybe_encode_nonstring_dtype(original)
     assert_identical(expected, actual)