Example #1
0
    def _assert_angle_variables(self, ds):
        satellite_zenith_angle = ds.variables["satellite_zenith_angle"]
        self.assertEqual((6, ), satellite_zenith_angle.shape)
        self.assertTrue(np.isnan(satellite_zenith_angle.data[3]))
        self.assertEqual(np.uint16, satellite_zenith_angle.encoding['dtype'])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint16),
                         satellite_zenith_angle.encoding['_FillValue'])
        self.assertEqual(0.01, satellite_zenith_angle.encoding['scale_factor'])
        self.assertEqual(-180.0, satellite_zenith_angle.encoding['add_offset'])
        self.assertEqual("platform_zenith_angle",
                         satellite_zenith_angle.attrs["standard_name"])
        self.assertEqual("degree", satellite_zenith_angle.attrs["units"])
        self.assertEqual("longitude latitude",
                         satellite_zenith_angle.attrs["coordinates"])

        solar_azimuth_angle = ds.variables["solar_azimuth_angle"]
        self.assertEqual((6, 56), solar_azimuth_angle.shape)
        self.assertTrue(np.isnan(solar_azimuth_angle.data[4, 4]))
        self.assertEqual(np.uint16, solar_azimuth_angle.encoding['dtype'])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint16),
                         solar_azimuth_angle.encoding['_FillValue'])
        self.assertEqual(0.01, solar_azimuth_angle.encoding['scale_factor'])
        self.assertEqual(-180.0, solar_azimuth_angle.encoding['add_offset'])
        self.assertEqual(CHUNKING_2D,
                         solar_azimuth_angle.encoding['chunksizes'])
        self.assertEqual("solar_azimuth_angle",
                         solar_azimuth_angle.attrs["standard_name"])
        self.assertEqual("degree", solar_azimuth_angle.attrs["units"])
        self.assertEqual("longitude latitude",
                         solar_azimuth_angle.attrs["coordinates"])
Example #2
0
    def _create_refl_uncertainty_variable(height,
                                          long_name=None,
                                          structured=False):
        default_array = DefaultData.create_default_array(SWATH_WIDTH,
                                                         height,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)

        tu.add_units(variable, "percent")
        tu.add_geolocation_attribute(variable)
        variable.attrs["long_name"] = long_name

        if structured:
            tu.add_encoding(variable,
                            np.int16,
                            DefaultData.get_default_fill_value(np.int16),
                            0.01,
                            chunksizes=CHUNKS_2D)
            variable.attrs["valid_min"] = 3
            variable.attrs["valid_max"] = 5
        else:
            tu.add_encoding(variable,
                            np.int16,
                            DefaultData.get_default_fill_value(np.int16),
                            0.00001,
                            chunksizes=CHUNKS_2D)
            variable.attrs["valid_max"] = 1000
            variable.attrs["valid_min"] = 10
        return variable
Example #3
0
    def assert_common_angles(self, ds, chunking=None):
        satellite_zenith_angle = ds.variables["satellite_zenith_angle"]
        self.assertEqual((6, 56), satellite_zenith_angle.shape)
        self.assertTrue(np.isnan(satellite_zenith_angle.data[2, 2]))
        self.assertEqual(np.uint16, satellite_zenith_angle.encoding['dtype'])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint16), satellite_zenith_angle.encoding['_FillValue'])
        self.assertEqual(0.01, satellite_zenith_angle.encoding['scale_factor'])
        self.assertEqual(0, satellite_zenith_angle.encoding['add_offset'])
        if chunking is not None:
            self.assertEqual(chunking, satellite_zenith_angle.encoding['chunksizes'])
        self.assertEqual("platform_zenith_angle", satellite_zenith_angle.attrs["standard_name"])
        self.assertEqual("degree", satellite_zenith_angle.attrs["units"])
        self.assertEqual("longitude latitude", satellite_zenith_angle.attrs["coordinates"])
        self.assertEqual([0, 180], satellite_zenith_angle.attrs["valid_range"])

        solar_zenith_angle = ds.variables["solar_zenith_angle"]
        self.assertEqual((6, 56), solar_zenith_angle.shape)
        self.assertTrue(np.isnan(solar_zenith_angle.data[3, 3]))
        self.assertEqual(np.uint16, solar_zenith_angle.encoding['dtype'])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint16), solar_zenith_angle.encoding['_FillValue'])
        self.assertEqual(0.01, solar_zenith_angle.encoding['scale_factor'])
        self.assertEqual(0, solar_zenith_angle.encoding['add_offset'])
        if chunking is not None:
            self.assertEqual(chunking, solar_zenith_angle.encoding['chunksizes'])
        self.assertEqual("solar_zenith_angle", solar_zenith_angle.attrs["standard_name"])
        self.assertEqual("solar_zenith_angle", solar_zenith_angle.attrs["orig_name"])
        self.assertEqual("degree", solar_zenith_angle.attrs["units"])
        self.assertEqual("longitude latitude", solar_zenith_angle.attrs["coordinates"])
        self.assertEqual([0, 180], solar_zenith_angle.attrs["valid_range"])

        satellite_azimuth_angle = ds.variables["satellite_azimuth_angle"]
        self.assertEqual((6, 56), satellite_azimuth_angle.shape)
        self.assertTrue(np.isnan(satellite_azimuth_angle.data[5, 5]))
        self.assertEqual(np.uint16, satellite_azimuth_angle.encoding['dtype'])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint16), satellite_azimuth_angle.encoding['_FillValue'])
        self.assertEqual(0.01, satellite_azimuth_angle.encoding['scale_factor'])
        self.assertEqual(0, satellite_azimuth_angle.encoding['add_offset'])
        if chunking is not None:
            self.assertEqual(chunking, satellite_azimuth_angle.encoding['chunksizes'])
        self.assertEqual("sensor_azimuth_angle", satellite_azimuth_angle.attrs["standard_name"])
        self.assertEqual([0, 360], satellite_azimuth_angle.attrs["valid_range"])
        self.assertEqual("clockwise from north", satellite_azimuth_angle.attrs["comment"])
        self.assertEqual("degree", satellite_azimuth_angle.attrs["units"])
        self.assertEqual("longitude latitude", satellite_azimuth_angle.attrs["coordinates"])

        solar_azimuth_angle = ds.variables["solar_azimuth_angle"]
        self.assertEqual((6, 56), solar_azimuth_angle.shape)
        self.assertTrue(np.isnan(solar_azimuth_angle.data[4, 4]))
        self.assertEqual(np.uint16, solar_azimuth_angle.encoding['dtype'])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint16), solar_azimuth_angle.encoding['_FillValue'])
        self.assertEqual(0.01, solar_azimuth_angle.encoding['scale_factor'])
        self.assertEqual(0, solar_azimuth_angle.encoding['add_offset'])
        if chunking is not None:
            self.assertEqual(chunking, solar_azimuth_angle.encoding['chunksizes'])
        self.assertEqual("solar_azimuth_angle", solar_azimuth_angle.attrs["standard_name"])
        self.assertEqual([0, 360], solar_azimuth_angle.attrs["valid_range"])
        self.assertEqual("clockwise from north", solar_azimuth_angle.attrs["comment"])
        self.assertEqual("degree", solar_azimuth_angle.attrs["units"])
        self.assertEqual("longitude latitude", solar_azimuth_angle.attrs["coordinates"])
Example #4
0
    def add_common_sensor_variables(dataset, height, srf_size):
        # scanline
        default_array = DefaultData.create_default_vector(height, np.int16)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16))
        variable.attrs["long_name"] = "scanline_number"
        tu.add_units(variable, "count")
        dataset["scanline"] = variable
        # time
        default_array = DefaultData.create_default_vector(height, np.datetime64)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, 4294967295)
        variable.attrs["standard_name"] = "time"
        variable.attrs["long_name"] = "Acquisition time in seconds since 1970-01-01 00:00:00"
        # do not set 'units' of "_FillValue" here, xarray sets this from encoding upon storing the file
        tu.add_encoding(variable, np.uint32, None, scale_factor=0.1)
        variable.encoding["units"] = "seconds since 1970-01-01 00:00:00"
        # encoding 'add_offset' varies per file and either needs to be set
        # by the user or intelligently in fiduceo.fcdr.writer.fcdr_writer.FCDRWriter.write
        dataset["time"] = variable
        # quality_scanline_bitmask
        default_array = DefaultData.create_default_vector(height, np.int32, fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "quality_indicator_bitfield"
        variable.attrs[
            "flag_masks"] = "1, 2, 4, 8, 16"
        variable.attrs["flag_meanings"] = "do_not_use_scan reduced_context bad_temp_no_rself suspect_geo suspect_time" 
        dataset["quality_scanline_bitmask"] = variable

        default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN)
        variable = Variable(["channel", "n_wavelengths"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function weights'
        variable.attrs["description"] = 'Per channel: weights for the relative spectral response function'
        tu.add_encoding(variable, np.int16, -32768, 0.000033)
        dataset['SRF_weights'] = variable

        default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN)
        variable = Variable(["channel", "n_wavelengths"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function wavelengths'
        variable.attrs["description"] = 'Per channel: wavelengths for the relative spectral response function'
        tu.add_encoding(variable, np.int32, -2147483648, 0.0001)
        tu.add_units(variable, "um")
        dataset['SRF_wavelengths'] = variable

        default_vector = DefaultData.create_default_vector(height, np.uint8, fill_value=255)
        variable = Variable(["y"], default_vector)
        tu.add_fill_value(variable, 255)
        variable.attrs["long_name"] = 'Indicator of original file'
        variable.attrs[
            "description"] = "Indicator for mapping each line to its corresponding original level 1b file. See global attribute 'source' for the filenames. 0 corresponds to 1st listed file, 1 to 2nd file."
        dataset["scanline_map_to_origl1bfile"] = variable

        default_vector = DefaultData.create_default_vector(height, np.int16, fill_value=DefaultData.get_default_fill_value(np.int16))
        variable = Variable(["y"], default_vector)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16))
        variable.attrs["long_name"] = 'Original_Scan_line_number'
        variable.attrs["description"] = 'Original scan line numbers from corresponding l1b records'
        dataset["scanline_origl1b"] = variable
Example #5
0
    def add_common_sensor_variables(dataset, height, srf_size):
        # scanline
        default_array = DefaultData.create_default_vector(height, np.int16)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16))
        variable.attrs["long_name"] = "scanline_number"
        tu.add_units(variable, "count")
        dataset["scanline"] = variable
        # time
        default_array = DefaultData.create_default_vector(height, np.uint32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint32))
        variable.attrs["standard_name"] = "time"
        variable.attrs["long_name"] = "Acquisition time in seconds since 1970-01-01 00:00:00"
        tu.add_units(variable, "s")
        dataset["time"] = variable
        # quality_scanline_bitmask
        default_array = DefaultData.create_default_vector(height, np.int32, fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "quality_indicator_bitfield"
        variable.attrs[
            "flag_masks"] = "1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912 1073741824"
        variable.attrs[
            "flag_meanings"] = "do_not_use_scan time_sequence_error data_gap_preceding_scan no_calibration no_earth_location clock_update status_changed line_incomplete, time_field_bad time_field_bad_not_inf inconsistent_sequence scan_time_repeat uncalib_bad_time calib_few_scans uncalib_bad_prt calib_marginal_prt uncalib_channels uncalib_inst_mode quest_ant_black_body zero_loc bad_loc_time bad_loc_marginal bad_loc_reason bad_loc_ant reduced_context bad_temp_no_rself"
        dataset["quality_scanline_bitmask"] = variable

        default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function weights'
        variable.attrs["description"] = 'Per channel: weights for the relative spectral response function'
        tu.add_encoding(variable, np.int16, -32768, 0.000033)
        dataset['SRF_weights'] = variable

        default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function wavelengths'
        variable.attrs["description"] = 'Per channel: wavelengths for the relative spectral response function'
        tu.add_encoding(variable, np.int32, -2147483648, 0.0001)
        tu.add_units(variable, "um")
        dataset['SRF_wavelengths'] = variable

        default_vector = DefaultData.create_default_vector(height, np.uint8, fill_value=255)
        variable = Variable(["y"], default_vector)
        tu.add_fill_value(variable, 255)
        variable.attrs["long_name"] = 'Indicator of original file'
        variable.attrs[
            "description"] = "Indicator for mapping each line to its corresponding original level 1b file. See global attribute 'source' for the filenames. 0 corresponds to 1st listed file, 1 to 2nd file."
        dataset["scanline_map_to_origl1bfile"] = variable

        default_vector = DefaultData.create_default_vector(height, np.int16, fill_value=DefaultData.get_default_fill_value(np.int16))
        variable = Variable(["y"], default_vector)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16))
        variable.attrs["long_name"] = 'Original_Scan_line_number'
        variable.attrs["description"] = 'Original scan line numbers from corresponding l1b records'
        dataset["scanline_origl1b"] = variable
Example #6
0
 def _assert_correct_counts_variable(self, ds, name, long_name):
     variable = ds.variables[name]
     self.assertEqual((5, 409), variable.shape)
     self.assertEqual(DefaultData.get_default_fill_value(np.int32),
                      variable.data[3, 306])
     self.assertEqual(DefaultData.get_default_fill_value(np.int32),
                      variable.attrs["_FillValue"])
     self.assertEqual(long_name, variable.attrs["long_name"])
     self.assertEqual("count", variable.attrs["units"])
     self.assertEqual("longitude latitude", variable.attrs["coordinates"])
     self.assertEqual(CHUNKING, variable.encoding["chunksizes"])
Example #7
0
    def _create_int32_vector(height, standard_name=None, long_name=None, orig_name=None):
        default_array = DefaultData.create_default_vector(height, np.int32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int32))
        HIRS._set_name_attributes(long_name, orig_name, standard_name, variable)

        return variable
Example #8
0
    def create_float_variable(width,
                              height,
                              standard_name=None,
                              long_name=None,
                              dim_names=None,
                              fill_value=None):
        if fill_value is None:
            default_array = DefaultData.create_default_array(
                width, height, np.float32)
        else:
            default_array = DefaultData.create_default_array(
                width, height, np.float32, fill_value=fill_value)

        if dim_names is None:
            variable = Variable(["y", "x"], default_array)
        else:
            variable = Variable(dim_names, default_array)

        if fill_value is None:
            variable.attrs["_FillValue"] = DefaultData.get_default_fill_value(
                np.float32)
        else:
            variable.attrs["_FillValue"] = fill_value

        if standard_name is not None:
            variable.attrs["standard_name"] = standard_name

        if long_name is not None:
            variable.attrs["long_name"] = long_name

        return variable
Example #9
0
    def _assert_line_int32_variable(self,
                                    ds,
                                    name,
                                    standard_name=None,
                                    long_name=None,
                                    orig_name=None):
        variable = ds.variables[name]
        self.assertEqual((7, ), variable.shape)
        self.assertEqual(DefaultData.get_default_fill_value(np.int32),
                         variable.data[4])
        self.assertEqual(DefaultData.get_default_fill_value(np.int32),
                         variable.attrs["_FillValue"])
        self._assert_name_attributes(variable, standard_name, long_name,
                                     orig_name)

        return variable
Example #10
0
 def _create_counts_uncertainty_vector_uint32(height, standard_name):
     default_array = DefaultData.create_default_vector(height, np.float32)
     variable = Variable(["y"], default_array)
     tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
     variable.attrs["standard_name"] = standard_name
     tu.add_units(variable, "count")
     return variable
Example #11
0
    def _create_scaled_int16_vector(height, standard_name=None, original_name=None, long_name=None, scale_factor=0.01):
        default_array = DefaultData.create_default_vector(height, np.float32)
        variable = Variable(["y"], default_array)
        tu.add_encoding(variable, np.int16, DefaultData.get_default_fill_value(np.int16), scale_factor)
        HIRS._set_name_attributes(long_name, original_name, standard_name, variable)

        return variable
Example #12
0
    def _create_angle_variable_int(scale_factor,
                                   standard_name=None,
                                   long_name=None,
                                   unsigned=False,
                                   fill_value=None):
        default_array = DefaultData.create_default_array(TIE_SIZE,
                                                         TIE_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y_tie", "x_tie"], default_array)

        if unsigned is True:
            data_type = np.uint16
        else:
            data_type = np.int16

        if fill_value is None:
            fill_value = DefaultData.get_default_fill_value(data_type)

        if standard_name is not None:
            variable.attrs["standard_name"] = standard_name

        if long_name is not None:
            variable.attrs["long_name"] = long_name

        tu.add_units(variable, "degree")
        variable.attrs["tie_points"] = "true"
        tu.add_encoding(variable,
                        data_type,
                        fill_value,
                        scale_factor,
                        chunksizes=CHUNKSIZES)
        return variable
Example #13
0
    def add_easy_fcdr_variables(dataset, height, corr_dx=None, corr_dy=None, lut_size=None):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        # reflectance
        default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["standard_name"] = "toa_bidirectional_reflectance_vis"
        variable.attrs["long_name"] = "top of atmosphere bidirectional reflectance factor per pixel of the visible band with central wavelength 0.7"
        tu.add_units(variable, "1")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 3.05176E-05, chunksizes=CHUNKSIZES)
        dataset["toa_bidirectional_reflectance_vis"] = variable

        # u_independent
        default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["long_name"] = "independent uncertainty per pixel"
        tu.add_units(variable, "1")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 3.05176E-05, chunksizes=CHUNKSIZES)
        dataset["u_independent_toa_bidirectional_reflectance"] = variable

        # u_structured
        default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["long_name"] = "structured uncertainty per pixel"
        tu.add_units(variable, "1")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 3.05176E-05, chunksizes=CHUNKSIZES)
        dataset["u_structured_toa_bidirectional_reflectance"] = variable

        # u_common
        dataset["u_common_toa_bidirectional_reflectance"] = tu.create_scalar_float_variable(long_name="common uncertainty per slot", units="1")

        dataset["sub_satellite_latitude_start"] = tu.create_scalar_float_variable(long_name="Latitude of the sub satellite point at image start", units="degrees_north")
        dataset["sub_satellite_longitude_start"] = tu.create_scalar_float_variable(long_name="Longitude of the sub satellite point at image start", units="degrees_east")
        dataset["sub_satellite_latitude_end"] = tu.create_scalar_float_variable(long_name="Latitude of the sub satellite point at image end", units="degrees_north")
        dataset["sub_satellite_longitude_end"] = tu.create_scalar_float_variable(long_name="Longitude of the sub satellite point at image end", units="degrees_east")

        tu.add_correlation_matrices(dataset, NUM_CHANNELS)

        if lut_size is not None:
            tu.add_lookup_tables(dataset, NUM_CHANNELS, lut_size=lut_size)

        if corr_dx is not None and corr_dy is not None:
            tu.add_correlation_coefficients(dataset, NUM_CHANNELS, corr_dx, corr_dy)

        tu.add_coordinates(dataset, ["vis", "wv", "ir"])
Example #14
0
    def _assert_line_uint32_variable(self,
                                     ds,
                                     name,
                                     standard_name=None,
                                     long_name=None,
                                     orig_name=None):
        variable = ds.variables[name]
        self.assertEqual((7, ), variable.shape)
        self.assertEqual(DefaultData.get_default_fill_value(np.float32),
                         variable.data[4])
        self.assertEqual(DefaultData.get_default_fill_value(np.uint32),
                         variable.encoding["_FillValue"])
        self.assertEqual(0.01, variable.encoding["scale_factor"])
        self.assertEqual(np.uint32, variable.encoding["dtype"])
        self._assert_name_attributes(variable, standard_name, long_name,
                                     orig_name)

        return variable
Example #15
0
 def _create_easy_fcdr_variable(height, long_name):
     default_array = DefaultData.create_default_array_3d(SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
     variable = Variable(["channel", "y", "x"], default_array)
     tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.001, chunksizes=CHUNKING_BT)
     variable.attrs["long_name"] = long_name
     tu.add_units(variable, "K")
     tu.add_geolocation_attribute(variable)
     variable.attrs["valid_min"] = 1
     variable.attrs["valid_max"] = 65534
     return variable
Example #16
0
 def _create_overpass_counts_variable(height, width, description):
     fill_value = DefaultData.get_default_fill_value(np.uint8)
     default_array = DefaultData.create_default_array(width,
                                                      height,
                                                      np.uint8,
                                                      fill_value=fill_value)
     variable = Variable(["y", "x"], default_array)
     tu.add_fill_value(variable, fill_value)
     variable.attrs["description"] = description
     variable.attrs["coordinates"] = "lon lat"
     return variable
Example #17
0
 def _create_float32_vector(fill_value, height, long_name, orig_name):
     default_array = DefaultData.create_default_vector(height, np.float32, fill_value=fill_value)
     variable = Variable(["y"], default_array)
     if fill_value is None:
         tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.float32))
     else:
         tu.add_fill_value(variable, fill_value)
     variable.attrs["long_name"] = long_name
     if orig_name is not None:
         variable.attrs["orig_name"] = orig_name
     return variable
Example #18
0
    def _create_geo_angle_variable(standard_name, height, orig_name=None, chunking=None):
        default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.float32, fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["standard_name"] = standard_name
        if orig_name is not None:
            variable.attrs["orig_name"] = orig_name

        tu.add_units(variable, "degree")
        tu.add_geolocation_attribute(variable)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01, -180.0, chunking)
        return variable
Example #19
0
 def _create_counts_variable(height, long_name):
     default_array = DefaultData.create_default_array(
         SWATH_WIDTH, height, np.int32)
     variable = Variable(["y", "x"], default_array)
     tu.add_fill_value(variable,
                       DefaultData.get_default_fill_value(np.int32))
     variable.attrs["long_name"] = long_name
     tu.add_units(variable, "count")
     tu.add_geolocation_attribute(variable)
     tu.add_chunking(variable, CHUNKS_2D)
     return variable
Example #20
0
 def test_get_default_fill_value(self):
     self.assertEqual(-127, DefaultData.get_default_fill_value(np.int8))
     self.assertEqual(-32767, DefaultData.get_default_fill_value(np.int16))
     self.assertEqual(np.uint16(-1),
                      DefaultData.get_default_fill_value(np.uint16))
     self.assertEqual(-2147483647,
                      DefaultData.get_default_fill_value(np.int32))
     self.assertEqual(-9223372036854775806,
                      DefaultData.get_default_fill_value(np.int64))
     self.assertEqual(np.float32(9.96921E36),
                      DefaultData.get_default_fill_value(np.float32))
     self.assertEqual(9.969209968386869E36,
                      DefaultData.get_default_fill_value(np.float64))
Example #21
0
    def _assert_line_float_variable(self,
                                    ds,
                                    name,
                                    standard_name=None,
                                    long_name=None,
                                    orig_name=None,
                                    fill_value=None):
        variable = ds.variables[name]
        self.assertEqual((7, ), variable.shape)
        if fill_value is None:
            self.assertEqual(DefaultData.get_default_fill_value(np.float32),
                             variable.data[4])
            self.assertEqual(DefaultData.get_default_fill_value(np.float32),
                             variable.attrs["_FillValue"])
        elif np.isnan(fill_value):
            self.assertTrue(np.isnan(variable.data[4]))
            self.assertTrue(np.isnan(variable.attrs["_FillValue"]))
        else:
            self.assertEqual(fill_value, variable.data[4])
            self.assertEqual(fill_value, variable.attrs["_FillValue"])

        self._assert_name_attributes(variable, standard_name, long_name,
                                     orig_name)
        return variable
Example #22
0
    def _add_angle_variables(dataset, height):
        default_array = DefaultData.create_default_vector(height,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "platform_zenith_angle"
        tu.add_units(variable, "degree")
        tu.add_geolocation_attribute(variable)
        tu.add_encoding(variable, np.uint16,
                        DefaultData.get_default_fill_value(np.uint16), 0.01,
                        -180.0)
        dataset["satellite_zenith_angle"] = variable

        dataset["solar_azimuth_angle"] = HIRS._create_geo_angle_variable(
            "solar_azimuth_angle", height, chunking=CHUNKING_2D)
Example #23
0
 def _assert_correct_refl_variable(self, variable, long_name):
     self.assertEqual((5, 409), variable.shape)
     self.assertTrue(np.isnan(variable.data[0, 8]))
     self.assertEqual("toa_reflectance", variable.attrs["standard_name"])
     self.assertEqual(long_name, variable.attrs["long_name"])
     self.assertEqual("1", variable.attrs["units"])
     self.assertEqual(np.int16, variable.encoding['dtype'])
     self.assertEqual(DefaultData.get_default_fill_value(np.int16),
                      variable.encoding['_FillValue'])
     self.assertEqual(0.0001, variable.encoding['scale_factor'])
     self.assertEqual(0.0, variable.encoding['add_offset'])
     self.assertEqual(CHUNKING, variable.encoding["chunksizes"])
     self.assertEqual(15000, variable.attrs["valid_max"])
     self.assertEqual(0, variable.attrs["valid_min"])
     self.assertEqual("longitude latitude", variable.attrs["coordinates"])
Example #24
0
 def _create_bt_uncertainty_variable(height, long_name):
     default_array = DefaultData.create_default_array(SWATH_WIDTH,
                                                      height,
                                                      np.float32,
                                                      fill_value=np.NaN)
     variable = Variable(["y", "x"], default_array)
     tu.add_units(variable, "K")
     tu.add_geolocation_attribute(variable)
     tu.add_encoding(variable,
                     np.int16,
                     DefaultData.get_default_fill_value(np.int16),
                     0.001,
                     chunksizes=CHUNKS_2D)
     variable.attrs["valid_max"] = 15000
     variable.attrs["valid_min"] = 1
     variable.attrs["long_name"] = long_name
     return variable
Example #25
0
 def _create_channel_refl_variable(height, long_name):
     default_array = DefaultData.create_default_array(SWATH_WIDTH,
                                                      height,
                                                      np.float32,
                                                      fill_value=np.NaN)
     variable = Variable(["y", "x"], default_array)
     variable.attrs["standard_name"] = "toa_reflectance"
     variable.attrs["long_name"] = long_name
     tu.add_units(variable, "1")
     tu.add_encoding(variable,
                     np.int16,
                     DefaultData.get_default_fill_value(np.int16),
                     0.0001,
                     chunksizes=CHUNKS_2D)
     variable.attrs["valid_max"] = 15000
     variable.attrs["valid_min"] = 0
     tu.add_geolocation_attribute(variable)
     return variable
Example #26
0
 def _create_channel_bt_variable(height, long_name):
     default_array = DefaultData.create_default_array(SWATH_WIDTH,
                                                      height,
                                                      np.float32,
                                                      fill_value=np.NaN)
     variable = Variable(["y", "x"], default_array)
     variable.attrs["standard_name"] = "toa_brightness_temperature"
     variable.attrs["long_name"] = long_name
     tu.add_units(variable, "K")
     variable.attrs["valid_max"] = 10000
     variable.attrs["valid_min"] = -20000
     tu.add_geolocation_attribute(variable)
     tu.add_encoding(variable,
                     np.int16,
                     DefaultData.get_default_fill_value(np.int16),
                     0.01,
                     273.15,
                     chunksizes=CHUNKS_2D)
     return variable
Example #27
0
    def _create_refl_uncertainty_variable(height,
                                          minmax,
                                          scale_factor,
                                          long_name=None,
                                          units=None):
        default_array = DefaultData.create_default_array(SWATH_WIDTH,
                                                         height,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)

        tu.add_units(variable, units)
        tu.add_geolocation_attribute(variable)
        variable.attrs["long_name"] = long_name

        tu.add_encoding(variable,
                        np.int16,
                        DefaultData.get_default_fill_value(np.int16),
                        scale_factor,
                        chunksizes=CHUNKS_2D)
        variable.attrs["valid_min"] = minmax[0]
        variable.attrs["valid_max"] = minmax[1]

        return variable
Example #28
0
    def add_original_variables(dataset, height, srf_size=None):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        tu.add_quality_flags(dataset, FULL_SIZE, FULL_SIZE, chunksizes=CHUNKSIZES)

        # time
        default_array = DefaultData.create_default_array(IR_SIZE, IR_SIZE, np.uint32)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint32))
        variable.attrs["standard_name"] = "time"
        variable.attrs["long_name"] = "Acquisition time of pixel"
        tu.add_units(variable, "seconds since 1970-01-01 00:00:00")
        tu.add_offset(variable, TIME_FILL_VALUE)
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["time"] = variable

        dataset["solar_azimuth_angle"] = MVIRI._create_angle_variable_int(0.005493164, standard_name="solar_azimuth_angle", unsigned=True)
        dataset["solar_zenith_angle"] = MVIRI._create_angle_variable_int(0.005493248, standard_name="solar_zenith_angle")
        dataset["satellite_azimuth_angle"] = MVIRI._create_angle_variable_int(0.01, standard_name="sensor_azimuth_angle", long_name="sensor_azimuth_angle", unsigned=True)
        dataset["satellite_zenith_angle"] = MVIRI._create_angle_variable_int(0.01, standard_name="platform_zenith_angle", unsigned=True)

        # count_ir
        default_array = DefaultData.create_default_array(IR_SIZE, IR_SIZE, np.uint8)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "Infrared Image Counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_ir"] = variable

        # count_wv
        default_array = DefaultData.create_default_array(IR_SIZE, IR_SIZE, np.uint8)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "WV Image Counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_wv"] = variable

        default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.uint8, fill_value=0)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["flag_masks"] = "1, 2, 4, 8, 16, 32"
        variable.attrs["flag_meanings"] = "uncertainty_suspicious uncertainty_too_large space_view_suspicious not_on_earth suspect_time suspect_geo"
        variable.attrs["standard_name"] = "status_flag"
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["data_quality_bitmask"] = variable

        # distance_sun_earth
        dataset["distance_sun_earth"] = tu.create_scalar_float_variable(long_name="Sun-Earth distance", units="au")

        # solar_irradiance_vis
        dataset["solar_irradiance_vis"] = tu.create_scalar_float_variable(standard_name="solar_irradiance_vis", long_name="Solar effective Irradiance", units="W*m-2")

        # u_solar_irradiance_vis
        default_array = np.full([], np.NaN, np.float32)
        variable = Variable([], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "Uncertainty in Solar effective Irradiance"
        tu.add_units(variable, "Wm^-2")
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.IMG_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.IMG_CORR_UNIT] = corr.DAYS
        variable.attrs[corr.IMG_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "rectangle"
        dataset["u_solar_irradiance_vis"] = variable

        if srf_size is None:
            srf_size = SRF_SIZE

        default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function weights'
        variable.attrs["description"] = 'Per channel: weights for the relative spectral response function'
        tu.add_encoding(variable, np.int16, -32768, 0.000033)
        dataset['SRF_weights'] = variable

        default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function frequencies'
        variable.attrs["description"] = 'Per channel: frequencies for the relative spectral response function'
        tu.add_encoding(variable, np.int32, -2147483648, 0.0001)
        tu.add_units(variable, "nm")
        variable.attrs["source"] = "Filename of SRF"
        variable.attrs["Valid(YYYYDDD)"] = "datestring"
        dataset['SRF_frequencies'] = variable

        # srf covariance_
        default_array = DefaultData.create_default_array(srf_size, srf_size, np.float32, fill_value=np.NaN)
        variable = Variable([SRF_VIS_DIMENSION, SRF_VIS_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "Covariance of the Visible Band Spectral Response Function"
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["covariance_spectral_response_function_vis"] = variable

        # u_srf_ir
        default_array = DefaultData.create_default_vector(srf_size, np.float32, fill_value=np.NaN)
        variable = Variable([SRF_IR_WV_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "Uncertainty in Spectral Response Function for IR channel"
        dataset["u_spectral_response_function_ir"] = variable

        # u_srf_wv
        default_array = DefaultData.create_default_vector(srf_size, np.float32, fill_value=np.NaN)
        variable = Variable([SRF_IR_WV_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "Uncertainty in Spectral Response Function for WV channel"
        dataset["u_spectral_response_function_wv"] = variable

        dataset["a_ir"] = tu.create_scalar_float_variable(long_name="Calibration parameter a for IR Band", units="mWm^-2sr^-1cm^-1")
        dataset["b_ir"] = tu.create_scalar_float_variable(long_name="Calibration parameter b for IR Band", units="mWm^-2sr^-1cm^-1/DC")
        dataset["u_a_ir"] = tu.create_scalar_float_variable(long_name="Uncertainty of calibration parameter a for IR Band", units="mWm^-2sr^-1cm^-1")
        dataset["u_b_ir"] = tu.create_scalar_float_variable(long_name="Uncertainty of calibration parameter b for IR Band", units="mWm^-2sr^-1cm^-1/DC")
        dataset["a_wv"] = tu.create_scalar_float_variable(long_name="Calibration parameter a for WV Band", units="mWm^-2sr^-1cm^-1")
        dataset["b_wv"] = tu.create_scalar_float_variable(long_name="Calibration parameter b for WV Band", units="mWm^-2sr^-1cm^-1/DC")
        dataset["u_a_wv"] = tu.create_scalar_float_variable(long_name="Uncertainty of calibration parameter a for WV Band", units="mWm^-2sr^-1cm^-1")
        dataset["u_b_wv"] = tu.create_scalar_float_variable(long_name="Uncertainty of calibration parameter b for WV Band", units="mWm^-2sr^-1cm^-1/DC")
        dataset["bt_a_ir"] = tu.create_scalar_float_variable(long_name="IR Band BT conversion parameter A", units="1")
        dataset["bt_b_ir"] = tu.create_scalar_float_variable(long_name="IR Band BT conversion parameter B", units="1")
        dataset["bt_a_wv"] = tu.create_scalar_float_variable(long_name="WV Band BT conversion parameter A", units="1")
        dataset["bt_b_wv"] = tu.create_scalar_float_variable(long_name="WV Band BT conversion parameter B", units="1")
        dataset["years_since_launch"] = tu.create_scalar_float_variable(long_name="Fractional year since launch of satellite", units="years")

        x_ir_wv_dim = dataset.dims["x_ir_wv"]
        dataset["x_ir_wv"] = Coordinate("x_ir_wv", np.arange(x_ir_wv_dim, dtype=np.uint16))

        y_ir_wv_dim = dataset.dims["y_ir_wv"]
        dataset["y_ir_wv"] = Coordinate("y_ir_wv", np.arange(y_ir_wv_dim, dtype=np.uint16))

        srf_size_dim = dataset.dims["srf_size"]
        dataset["srf_size"] = Coordinate("srf_size", np.arange(srf_size_dim, dtype=np.uint16))
Example #29
0
    def add_full_fcdr_variables(dataset, height):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        # count_vis
        default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.uint8)
        variable = Variable(["y", "x"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "Image counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_vis"] = variable

        dataset["u_latitude"] = MVIRI._create_angle_variable_int(1.5E-05, long_name="Uncertainty in Latitude", unsigned=True)
        MVIRI._add_geo_correlation_attributes(dataset["u_latitude"])

        dataset["u_longitude"] = MVIRI._create_angle_variable_int(1.5E-05, long_name="Uncertainty in Longitude", unsigned=True)
        MVIRI._add_geo_correlation_attributes(dataset["u_longitude"])

        # u_time
        default_array = DefaultData.create_default_vector(IR_SIZE, np.float32, fill_value=np.NaN)
        variable = Variable([IR_Y_DIMENSION], default_array)
        variable.attrs["standard_name"] = "Uncertainty in Time"
        tu.add_units(variable, "s")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.009155273)
        variable.attrs["pdf_shape"] = "rectangle"
        dataset["u_time"] = variable

        dataset["u_satellite_zenith_angle"] = MVIRI._create_angle_variable_int(7.62939E-05, long_name="Uncertainty in Satellite Zenith Angle", unsigned=True)
        dataset["u_satellite_azimuth_angle"] = MVIRI._create_angle_variable_int(7.62939E-05, long_name="Uncertainty in Satellite Azimuth Angle", unsigned=True)
        dataset["u_solar_zenith_angle"] = MVIRI._create_angle_variable_int(7.62939E-05, long_name="Uncertainty in Solar Zenith Angle", unsigned=True)
        dataset["u_solar_azimuth_angle"] = MVIRI._create_angle_variable_int(7.62939E-05, long_name="Uncertainty in Solar Azimuth Angle", unsigned=True)

        dataset["a0_vis"] = tu.create_scalar_float_variable("Calibration Coefficient at Launch", units="Wm^-2sr^-1/count")
        dataset["a1_vis"] = tu.create_scalar_float_variable("Time variation of a0", units="Wm^-2sr^-1/count day^-1 10^5")
        dataset["a2_vis"] = tu.create_scalar_float_variable("Time variation of a0, quadratic term", units="Wm^-2sr^-1/count year^-2")
        dataset["mean_count_space_vis"] = tu.create_scalar_float_variable("Space count", units="count")

        # u_a0_vis
        variable = tu.create_scalar_float_variable("Uncertainty in a0", units="Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a0_vis"] = variable

        # u_a1_vis
        variable = tu.create_scalar_float_variable("Uncertainty in a1", units="Wm^-2sr^-1/count day^-1 10^5")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a1_vis"] = variable

        # u_a2_vis
        variable = tu.create_scalar_float_variable("Uncertainty in a2", units="Wm^-2sr^-1/count year^-2")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a2_vis"] = variable

        # u_zero_vis
        variable = tu.create_scalar_float_variable("Uncertainty zero term", units="Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(variable, image_correlation_scale=[-np.inf, np.inf])
        dataset["u_zero_vis"] = variable

        # covariance_a_vis
        variable = tu.create_float_variable(COV_SIZE, COV_SIZE, long_name="Covariance of calibration coefficients from fit to calibration runs", dim_names=["cov_size", "cov_size"], fill_value=np.NaN)
        tu.add_fill_value(variable, np.NaN)
        tu.add_units(variable, "Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(variable, image_correlation_scale=[-np.inf, np.inf])
        dataset["covariance_a_vis"] = variable

        dataset["u_electronics_counts_vis"] = tu.create_scalar_float_variable("Uncertainty due to Electronics noise", units="count")
        dataset["u_digitization_counts_vis"] = tu.create_scalar_float_variable("Uncertainty due to digitization", units="count")

        # allan_deviation_counts_space_vis
        variable = tu.create_scalar_float_variable("Uncertainty of space count", units="count")
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "digitised_gaussian"
        dataset["allan_deviation_counts_space_vis"] = variable

        # u_mean_counts_space_vis
        variable = tu.create_scalar_float_variable("Uncertainty of space count", units="count")
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "digitised_gaussian"
        dataset["u_mean_counts_space_vis"] = variable

        # sensitivity_solar_irradiance_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis * solar_irradiance_vis)"
        dataset["sensitivity_solar_irradiance_vis"] = variable

        # sensitivity_count_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_count_vis"] = variable

        # sensitivity_count_space
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "-1.0 * distance_sun_earth * distance_sun_earth * PI * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_count_space"] = variable

        # sensitivity_a0_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs["expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a0_vis"] = variable

        # sensitivity_a1_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * years_since_launch / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a1_vis"] = variable

        # sensitivity_a2_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * years_since_launch*years_since_launch / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a2_vis"] = variable

        effect_names = ["u_solar_irradiance_vis", "u_a0_vis", "u_a1_vis", "u_a2_vis", "u_zero_vis", "u_solar_zenith_angle", "u_mean_count_space_vis"]
        dataset["Ne"] = Coordinate("Ne", effect_names)

        num_effects = len(effect_names)
        default_array = DefaultData.create_default_array(num_effects, num_effects, np.float32, fill_value=np.NaN)
        variable = Variable(["Ne", "Ne"], default_array)
        tu.add_encoding(variable, np.int16, -32768, 3.05176E-05)
        variable.attrs["valid_min"] = -1
        variable.attrs["valid_max"] = 1
        variable.attrs["long_name"] = "Channel error correlation matrix for structured effects."
        variable.attrs["description"] = "Matrix_describing correlations between errors of the uncertainty_effects due to spectral response function errors (determined using Monte Carlo approach)"
        dataset["effect_correlation_matrix"] = variable
Example #30
0
    def add_original_variables(dataset,
                               height,
                               srf_size=None,
                               corr_dx=None,
                               corr_dy=None,
                               lut_size=None):
        tu.add_geolocation_variables(dataset, SWATH_WIDTH, height)
        tu.add_quality_flags(dataset, SWATH_WIDTH, height)

        # btemps
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        variable.attrs["standard_name"] = "toa_brightness_temperature"
        tu.add_encoding(variable, np.int32, -999999, scale_factor=0.01)
        tu.add_units(variable, "K")
        variable.attrs["ancillary_variables"] = "chanqual qualind scanqual"
        dataset["btemps"] = variable

        # chanqual
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.int32,
            dims_names=["channel", "y"],
            fill_value=0)
        variable = Variable(["channel", "y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["flag_masks"] = "1, 2, 4, 8, 16, 32"
        variable.attrs[
            "flag_meanings"] = "some_bad_prt_temps some_bad_space_view_counts some_bad_bb_counts no_good_prt_temps no_good_space_view_counts no_good_bb_counts"
        dataset["chanqual"] = variable

        # instrtemp
        default_array = DefaultData.create_default_vector(height,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_units(variable, "K")
        tu.add_encoding(variable,
                        np.int32,
                        DefaultData.get_default_fill_value(np.int32),
                        scale_factor=0.01)
        variable.attrs["long_name"] = "instrument_temperature"
        dataset["instrtemp"] = variable

        # qualind
        default_array = DefaultData.create_default_vector(height,
                                                          np.int32,
                                                          fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs[
            "flag_masks"] = "33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, 2147483648"
        variable.attrs[
            "flag_meanings"] = "instr_status_changed first_good_clock_update no_earth_loc no_calib data_gap_precedes time_seq_error not_use_scan"
        dataset["qualind"] = variable

        # scanqual
        default_array = DefaultData.create_default_vector(height,
                                                          np.int32,
                                                          fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs[
            "flag_masks"] = "8, 16, 32, 64, 128, 1024, 2048, 4096, 8192, 16384, 32768, 1048576, 2097152, 4194304, 8388608"
        variable.attrs[
            "flag_meanings"] = "earth_loc_quest_ant_pos earth_loc_quest_reas earth_loc_quest_margin earth_loc_quest_time no_earth_loc_time uncalib_instr_mode uncalib_channels calib_marg_prt uncalib_bad_prt calib_few_scans uncalib_bad_time repeat_scan_times inconsistent_time time_field_bad time_field_inferred"
        dataset["scanqual"] = variable

        # scnlin
        default_array = DefaultData.create_default_vector(height, np.int32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.int32))
        variable.attrs["long_name"] = "scanline"
        dataset["scnlin"] = variable

        # scnlindy
        default_array = DefaultData.create_default_vector(height, np.int32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.int32))
        variable.attrs["long_name"] = "Acquisition day of year of scan"
        dataset["scnlindy"] = variable

        # scnlintime
        default_array = DefaultData.create_default_vector(height, np.int32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.int32))
        variable.attrs[
            "long_name"] = "Acquisition time of scan in milliseconds since beginning of the day"
        tu.add_units(variable, "ms")
        dataset["scnlintime"] = variable

        # scnlinyr
        default_array = DefaultData.create_default_vector(height, np.int32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.int32))
        variable.attrs["long_name"] = "Acquisition year of scan"
        dataset["scnlinyr"] = variable

        # satellite_azimuth_angle
        variable = AMSUB_MHS.create_angle_variable(height,
                                                   "sensor_azimuth_angle")
        dataset["satellite_azimuth_angle"] = variable

        # satellite_zenith_angle
        variable = AMSUB_MHS.create_angle_variable(height,
                                                   "sensor_zenith_angle")
        dataset["satellite_zenith_angle"] = variable

        # solar_azimuth_angle
        variable = AMSUB_MHS.create_angle_variable(height,
                                                   "solar_azimuth_angle")
        dataset["solar_azimuth_angle"] = variable

        # solar_zenith_angle
        variable = AMSUB_MHS.create_angle_variable(height,
                                                   "solar_zenith_angle")
        dataset["solar_zenith_angle"] = variable

        # acquisition_time
        default_array = DefaultData.create_default_vector(height, np.int32)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.int32))
        variable.attrs["standard_name"] = "time"
        variable.attrs[
            "long_name"] = "Acquisition time in seconds since 1970-01-01 00:00:00"
        tu.add_units(variable, "s")
        dataset["acquisition_time"] = variable