def add_gridded_geolocation_variables(dataset, width, height): default_array = DefaultData.create_default_vector(height, np.float32, fill_value=np.NaN) variable = Variable(["y"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["standard_name"] = LAT_NAME variable.attrs["long_name"] = LAT_NAME variable.attrs["bounds"] = "lat_bnds" TemplateUtil.add_units(variable, LATITUDE_UNIT) dataset["lat"] = variable default_array = DefaultData.create_default_array(2, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "bounds"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["long_name"] = "latitude cell boundaries" TemplateUtil.add_units(variable, LATITUDE_UNIT) dataset["lat_bnds"] = variable default_array = DefaultData.create_default_vector(width, np.float32, fill_value=np.NaN) variable = Variable(["x"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["standard_name"] = LON_NAME variable.attrs["long_name"] = LON_NAME TemplateUtil.add_units(variable, LONGITUDE_UNIT) variable.attrs["bounds"] = "lon_bnds" dataset["lon"] = variable default_array = DefaultData.create_default_array(2, width, np.float32, fill_value=np.NaN) variable = Variable(["x", "bounds"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) TemplateUtil.add_units(variable, LONGITUDE_UNIT) variable.attrs["long_name"] = "longitude cell boundaries" dataset["lon_bnds"] = variable
def create_float_variable(width, height, standard_name=None, long_name=None, dim_names=None, fill_value=None): if fill_value is None: default_array = DefaultData.create_default_array( width, height, np.float32) else: default_array = DefaultData.create_default_array( width, height, np.float32, fill_value=fill_value) if dim_names is None: variable = Variable(["y", "x"], default_array) else: variable = Variable(dim_names, default_array) if fill_value is None: variable.attrs["_FillValue"] = DefaultData.get_default_fill_value( np.float32) else: variable.attrs["_FillValue"] = fill_value if standard_name is not None: variable.attrs["standard_name"] = standard_name if long_name is not None: variable.attrs["long_name"] = long_name return variable
def add_original_variables(dataset, height, srf_size=None): # height is ignored - supplied just for interface compatibility tb 2017-07-19 # latitude_vis default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = "latitude" tu.add_units(variable, "degrees_north") tu.add_encoding(variable, np.int16, -32768, scale_factor=0.0027466658) dataset["latitude_vis"] = variable # longitude_vis default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = "longitude" tu.add_units(variable, "degrees_east") tu.add_encoding(variable, np.int16, -32768, scale_factor=0.0054933317) dataset["longitude_vis"] = variable # latitude_ir_wv default_array = DefaultData.create_default_array(IR_SIZE, IR_SIZE, np.float32, fill_value=np.NaN) variable = Variable([IR_X_DIMENSION, IR_X_DIMENSION], default_array) variable.attrs["standard_name"] = "latitude" tu.add_units(variable, "degrees_north") tu.add_encoding(variable, np.int16, -32768, scale_factor=0.0027466658) dataset["latitude_ir_wv"] = variable # longitude_ir_wv default_array = DefaultData.create_default_array(IR_SIZE, IR_SIZE, np.float32, fill_value=np.NaN) variable = Variable([IR_X_DIMENSION, IR_X_DIMENSION], default_array) variable.attrs["standard_name"] = "longitude" tu.add_units(variable, "degrees_east") tu.add_encoding(variable, np.int16, -32768, scale_factor=0.0054933317) dataset["longitude_ir_wv"] = variable
def add_geolocation_variables(dataset, width, height, chunksizes=None): default_array = DefaultData.create_default_array(width, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = LAT_NAME TemplateUtil.add_units(variable, LATITUDE_UNIT) TemplateUtil.add_encoding(variable, np.int16, -32768, scale_factor=0.0027466658, chunksizes=chunksizes) dataset[LAT_NAME] = variable default_array = DefaultData.create_default_array(width, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = LON_NAME TemplateUtil.add_units(variable, LONGITUDE_UNIT) TemplateUtil.add_encoding(variable, np.int16, -32768, scale_factor=0.0054933317, chunksizes=chunksizes) dataset[LON_NAME] = variable
def add_common_sensor_variables(dataset, height, srf_size): # scanline default_array = DefaultData.create_default_vector(height, np.int16) variable = Variable(["y"], default_array) tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16)) variable.attrs["long_name"] = "scanline_number" tu.add_units(variable, "count") dataset["scanline"] = variable # time default_array = DefaultData.create_default_vector(height, np.datetime64) variable = Variable(["y"], default_array) tu.add_fill_value(variable, 4294967295) variable.attrs["standard_name"] = "time" variable.attrs["long_name"] = "Acquisition time in seconds since 1970-01-01 00:00:00" # do not set 'units' of "_FillValue" here, xarray sets this from encoding upon storing the file tu.add_encoding(variable, np.uint32, None, scale_factor=0.1) variable.encoding["units"] = "seconds since 1970-01-01 00:00:00" # encoding 'add_offset' varies per file and either needs to be set # by the user or intelligently in fiduceo.fcdr.writer.fcdr_writer.FCDRWriter.write dataset["time"] = variable # quality_scanline_bitmask default_array = DefaultData.create_default_vector(height, np.int32, fill_value=0) variable = Variable(["y"], default_array) variable.attrs["standard_name"] = "status_flag" variable.attrs["long_name"] = "quality_indicator_bitfield" variable.attrs[ "flag_masks"] = "1, 2, 4, 8, 16" variable.attrs["flag_meanings"] = "do_not_use_scan reduced_context bad_temp_no_rself suspect_geo suspect_time" dataset["quality_scanline_bitmask"] = variable default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN) variable = Variable(["channel", "n_wavelengths"], default_array) variable.attrs["long_name"] = 'Spectral Response Function weights' variable.attrs["description"] = 'Per channel: weights for the relative spectral response function' tu.add_encoding(variable, np.int16, -32768, 0.000033) dataset['SRF_weights'] = variable default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN) variable = Variable(["channel", "n_wavelengths"], default_array) variable.attrs["long_name"] = 'Spectral Response Function wavelengths' variable.attrs["description"] = 'Per channel: wavelengths for the relative spectral response function' tu.add_encoding(variable, np.int32, -2147483648, 0.0001) tu.add_units(variable, "um") dataset['SRF_wavelengths'] = variable default_vector = DefaultData.create_default_vector(height, np.uint8, fill_value=255) variable = Variable(["y"], default_vector) tu.add_fill_value(variable, 255) variable.attrs["long_name"] = 'Indicator of original file' variable.attrs[ "description"] = "Indicator for mapping each line to its corresponding original level 1b file. See global attribute 'source' for the filenames. 0 corresponds to 1st listed file, 1 to 2nd file." dataset["scanline_map_to_origl1bfile"] = variable default_vector = DefaultData.create_default_vector(height, np.int16, fill_value=DefaultData.get_default_fill_value(np.int16)) variable = Variable(["y"], default_vector) tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16)) variable.attrs["long_name"] = 'Original_Scan_line_number' variable.attrs["description"] = 'Original scan line numbers from corresponding l1b records' dataset["scanline_origl1b"] = variable
def add_common_sensor_variables(dataset, height, srf_size): # scanline default_array = DefaultData.create_default_vector(height, np.int16) variable = Variable(["y"], default_array) tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16)) variable.attrs["long_name"] = "scanline_number" tu.add_units(variable, "count") dataset["scanline"] = variable # time default_array = DefaultData.create_default_vector(height, np.uint32) variable = Variable(["y"], default_array) tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint32)) variable.attrs["standard_name"] = "time" variable.attrs["long_name"] = "Acquisition time in seconds since 1970-01-01 00:00:00" tu.add_units(variable, "s") dataset["time"] = variable # quality_scanline_bitmask default_array = DefaultData.create_default_vector(height, np.int32, fill_value=0) variable = Variable(["y"], default_array) variable.attrs["standard_name"] = "status_flag" variable.attrs["long_name"] = "quality_indicator_bitfield" variable.attrs[ "flag_masks"] = "1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912 1073741824" variable.attrs[ "flag_meanings"] = "do_not_use_scan time_sequence_error data_gap_preceding_scan no_calibration no_earth_location clock_update status_changed line_incomplete, time_field_bad time_field_bad_not_inf inconsistent_sequence scan_time_repeat uncalib_bad_time calib_few_scans uncalib_bad_prt calib_marginal_prt uncalib_channels uncalib_inst_mode quest_ant_black_body zero_loc bad_loc_time bad_loc_marginal bad_loc_reason bad_loc_ant reduced_context bad_temp_no_rself" dataset["quality_scanline_bitmask"] = variable default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN) variable = Variable(["channel", "n_frequencies"], default_array) variable.attrs["long_name"] = 'Spectral Response Function weights' variable.attrs["description"] = 'Per channel: weights for the relative spectral response function' tu.add_encoding(variable, np.int16, -32768, 0.000033) dataset['SRF_weights'] = variable default_array = DefaultData.create_default_array(srf_size, NUM_CHANNELS, np.float32, fill_value=np.NaN) variable = Variable(["channel", "n_frequencies"], default_array) variable.attrs["long_name"] = 'Spectral Response Function wavelengths' variable.attrs["description"] = 'Per channel: wavelengths for the relative spectral response function' tu.add_encoding(variable, np.int32, -2147483648, 0.0001) tu.add_units(variable, "um") dataset['SRF_wavelengths'] = variable default_vector = DefaultData.create_default_vector(height, np.uint8, fill_value=255) variable = Variable(["y"], default_vector) tu.add_fill_value(variable, 255) variable.attrs["long_name"] = 'Indicator of original file' variable.attrs[ "description"] = "Indicator for mapping each line to its corresponding original level 1b file. See global attribute 'source' for the filenames. 0 corresponds to 1st listed file, 1 to 2nd file." dataset["scanline_map_to_origl1bfile"] = variable default_vector = DefaultData.create_default_vector(height, np.int16, fill_value=DefaultData.get_default_fill_value(np.int16)) variable = Variable(["y"], default_vector) tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int16)) variable.attrs["long_name"] = 'Original_Scan_line_number' variable.attrs["description"] = 'Original scan line numbers from corresponding l1b records' dataset["scanline_origl1b"] = variable
def add_lookup_tables(dataset, num_channels, lut_size): default_array = DefaultData.create_default_array(num_channels, lut_size, np.float32, fill_value=np.NaN) variable = Variable(["lut_size", "channel"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["description"] = "Lookup table to convert radiance to brightness temperatures" dataset['lookup_table_BT'] = variable default_array = DefaultData.create_default_array(num_channels, lut_size, np.float32, fill_value=np.NaN) variable = Variable(["lut_size", "channel"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["description"] = "Lookup table to convert brightness temperatures to radiance" dataset['lookup_table_radiance'] = variable
def add_correlation_coefficients(dataset, num_channels, delta_x, delta_y): default_array = DefaultData.create_default_array(num_channels, delta_x, np.float32, fill_value=np.NaN) variable = Variable(["delta_x", "channel"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["long_name"] = "cross_element_correlation_coefficients" variable.attrs["description"] = "Correlation coefficients per channel for scanline correlation" dataset['cross_element_correlation_coefficients'] = variable default_array = DefaultData.create_default_array(num_channels, delta_y, np.float32, fill_value=np.NaN) variable = Variable(["delta_y", "channel"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["long_name"] = "cross_line_correlation_coefficients" variable.attrs["description"] = "Correlation coefficients per channel for inter scanline correlation" dataset['cross_line_correlation_coefficients'] = variable
def add_variables(dataset, width, height): tu.add_geolocation_variables(dataset, width, height, chunksizes=CHUNKING) tu.add_quality_flags(dataset, width, height, chunksizes=CHUNKING) default_array = DefaultData.create_default_vector( height, np.int32, fill_value=4294967295) variable = Variable(["y"], default_array) tu.add_fill_value(variable, 4294967295) variable.attrs["standard_name"] = "time" variable.attrs[ "long_name"] = "Acquisition time in seconds since 1970-01-01 00:00:00" tu.add_units(variable, "s") dataset["time"] = variable default_array = DefaultData.create_default_array(width, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) tu.add_fill_value(variable, np.NaN) variable.attrs["coordinates"] = "longitude latitude" dataset["aot"] = variable dataset["u_independent_aot"] = tu.create_CDR_uncertainty( width, height, "Uncertainty of aot due to independent effects") dataset["u_structured_aot"] = tu.create_CDR_uncertainty( width, height, "Uncertainty of aot due to structured effects") dataset["u_common_aot"] = tu.create_CDR_uncertainty( width, height, "Uncertainty of aot due to common effects")
def add_quality_flags(dataset, width, height, chunksizes=None, masks_append=None, meanings_append=None): default_array = DefaultData.create_default_array(width, height, np.uint8, fill_value=0) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = "status_flag" TemplateUtil.add_geolocation_attribute(variable) masks = "1, 2, 4, 8, 16, 32, 64, 128" if masks_append is not None: masks = masks + masks_append variable.attrs["flag_masks"] = masks meanings = "invalid use_with_caution invalid_input invalid_geoloc invalid_time sensor_error padded_data incomplete_channel_data" if meanings_append is not None: meanings = meanings + meanings_append variable.attrs["flag_meanings"] = meanings if chunksizes is not None: TemplateUtil.add_chunking(variable, chunksizes) dataset["quality_pixel_bitmask"] = variable
def _create_angle_variable_int(scale_factor, standard_name=None, long_name=None, unsigned=False, fill_value=None): default_array = DefaultData.create_default_array(TIE_SIZE, TIE_SIZE, np.float32, fill_value=np.NaN) variable = Variable(["y_tie", "x_tie"], default_array) if unsigned is True: data_type = np.uint16 else: data_type = np.int16 if fill_value is None: fill_value = DefaultData.get_default_fill_value(data_type) if standard_name is not None: variable.attrs["standard_name"] = standard_name if long_name is not None: variable.attrs["long_name"] = long_name tu.add_units(variable, "degree") variable.attrs["tie_points"] = "true" tu.add_encoding(variable, data_type, fill_value, scale_factor, chunksizes=CHUNKSIZES) return variable
def _create_refl_uncertainty_variable(height, long_name=None, structured=False): default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) tu.add_units(variable, "percent") tu.add_geolocation_attribute(variable) variable.attrs["long_name"] = long_name if structured: tu.add_encoding(variable, np.int16, DefaultData.get_default_fill_value(np.int16), 0.01, chunksizes=CHUNKS_2D) variable.attrs["valid_min"] = 3 variable.attrs["valid_max"] = 5 else: tu.add_encoding(variable, np.int16, DefaultData.get_default_fill_value(np.int16), 0.00001, chunksizes=CHUNKS_2D) variable.attrs["valid_max"] = 1000 variable.attrs["valid_min"] = 10 return variable
def add_easy_fcdr_variables(dataset, height, corr_dx=None, corr_dy=None, lut_size=None): # height is ignored - supplied just for interface compatibility tb 2017-02-05 # reflectance default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = "toa_bidirectional_reflectance_vis" variable.attrs["long_name"] = "top of atmosphere bidirectional reflectance factor per pixel of the visible band with central wavelength 0.7" tu.add_units(variable, "1") tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 3.05176E-05, chunksizes=CHUNKSIZES) dataset["toa_bidirectional_reflectance_vis"] = variable # u_independent default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["long_name"] = "independent uncertainty per pixel" tu.add_units(variable, "1") tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 3.05176E-05, chunksizes=CHUNKSIZES) dataset["u_independent_toa_bidirectional_reflectance"] = variable # u_structured default_array = DefaultData.create_default_array(FULL_SIZE, FULL_SIZE, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["long_name"] = "structured uncertainty per pixel" tu.add_units(variable, "1") tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 3.05176E-05, chunksizes=CHUNKSIZES) dataset["u_structured_toa_bidirectional_reflectance"] = variable # u_common dataset["u_common_toa_bidirectional_reflectance"] = tu.create_scalar_float_variable(long_name="common uncertainty per slot", units="1") dataset["sub_satellite_latitude_start"] = tu.create_scalar_float_variable(long_name="Latitude of the sub satellite point at image start", units="degrees_north") dataset["sub_satellite_longitude_start"] = tu.create_scalar_float_variable(long_name="Longitude of the sub satellite point at image start", units="degrees_east") dataset["sub_satellite_latitude_end"] = tu.create_scalar_float_variable(long_name="Latitude of the sub satellite point at image end", units="degrees_north") dataset["sub_satellite_longitude_end"] = tu.create_scalar_float_variable(long_name="Longitude of the sub satellite point at image end", units="degrees_east") tu.add_correlation_matrices(dataset, NUM_CHANNELS) if lut_size is not None: tu.add_lookup_tables(dataset, NUM_CHANNELS, lut_size=lut_size) if corr_dx is not None and corr_dy is not None: tu.add_correlation_coefficients(dataset, NUM_CHANNELS, corr_dx, corr_dy) tu.add_coordinates(dataset, ["vis", "wv", "ir"])
def add_extended_flag_variables(dataset, height): # quality_channel_bitmask default_array = DefaultData.create_default_array(NUM_CHANNELS, height, np.uint8, dims_names=["y", "channel"], fill_value=0) variable = Variable(["y", "channel"], default_array) variable.attrs["standard_name"] = "status_flag" variable.attrs["long_name"] = "channel_quality_flags_bitfield" variable.attrs["flag_masks"] = "1, 2, 4, 8, 16" variable.attrs["flag_meanings"] = "do_not_use uncertainty_suspicious self_emission_fails calibration_impossible calibration_suspect" dataset["quality_channel_bitmask"] = variable
def _create_geo_angle_uncertainty_variable(standard_name, height, fill_value, orig_name=None): default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.float32, fill_value=fill_value) variable = Variable(["y", "x"], default_array) tu.add_encoding(variable, np.uint16, fill_value, scale_factor=0.01) variable.attrs["standard_name"] = standard_name if orig_name is not None: variable.attrs["orig_name"] = orig_name tu.add_units(variable, "degree") return variable
def create_angle_variable(height, standard_name): default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = standard_name tu.add_units(variable, "degree") tu.add_encoding(variable, np.int32, -999999, scale_factor=0.01) return variable
def _create_counts_variable(height, long_name): default_array = DefaultData.create_default_array( SWATH_WIDTH, height, np.int32) variable = Variable(["y", "x"], default_array) tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.int32)) variable.attrs["long_name"] = long_name tu.add_units(variable, "count") tu.add_geolocation_attribute(variable) tu.add_chunking(variable, CHUNKS_2D) return variable
def _create_geo_angle_variable(standard_name, height, orig_name=None, chunking=None): default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) variable.attrs["standard_name"] = standard_name if orig_name is not None: variable.attrs["orig_name"] = orig_name tu.add_units(variable, "degree") tu.add_geolocation_attribute(variable) tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01, -180.0, chunking) return variable
def _create_overpass_counts_variable(height, width, description): fill_value = DefaultData.get_default_fill_value(np.uint8) default_array = DefaultData.create_default_array(width, height, np.uint8, fill_value=fill_value) variable = Variable(["y", "x"], default_array) tu.add_fill_value(variable, fill_value) variable.attrs["description"] = description variable.attrs["coordinates"] = "lon lat" return variable
def setUp(self): self.dataset = xr.Dataset() self.dataset.attrs["template_key"] = "HIRS2" default_array = DefaultData.create_default_array(5, 5, np.uint16, fill_value=0) variable = Variable(["y", "x"], default_array) self.dataset["data_quality_bitmask"] = variable self.dataset["quality_pixel_bitmask"] = variable default_array = DefaultData.create_default_vector(5, np.int32, fill_value=0) variable = Variable(["y"], default_array) self.dataset["quality_scanline_bitmask"] = variable default_array = DefaultData.create_default_array(19, 5, np.uint8, fill_value=0) variable = Variable(["y", "channel"], default_array) self.dataset["quality_channel_bitmask"] = variable tempDir = tempfile.gettempdir() self.testDir = os.path.join(tempDir, 'fcdr') os.mkdir(self.testDir)
def test_check_scaling_ranges_int16_array_ok(self): default_array = DefaultData.create_default_array(2, 2, np.float32) default_array[0][0] = 75.534 # 32767 default_array[0][1] = -55.536 # -32768 default_array[1][0] = np.NaN default_array[1][1] = 14.06 variable = Variable(["y", "x"], default_array) variable.encoding = dict([('dtype', np.int16), ('_FillValue', -32767), ('scale_factor', 0.002), ('add_offset', 10)]) DataUtility.check_scaling_ranges(variable)
def test_check_scaling_ranges_uint16_array_ok(self): default_array = DefaultData.create_default_array(2, 2, np.float32) default_array[0][0] = 9 # 9 default_array[0][1] = 205.605 # 65535 default_array[1][0] = np.NaN default_array[1][1] = 14.06 variable = Variable(["y", "x"], default_array) variable.encoding = dict([('dtype', np.uint16), ('_FillValue', 65535), ('scale_factor', 0.003), ('add_offset', 9)]) DataUtility.check_scaling_ranges(variable)
def add_quality_flags(dataset, height): tu.add_quality_flags(dataset, SWATH_WIDTH, height, chunksizes=CHUNKING_2D) default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.uint16, fill_value=0) variable = Variable(["y", "x"], default_array) variable.attrs["flag_masks"] = "1, 2, 4, 8, 16" variable.attrs["flag_meanings"] = "suspect_mirror suspect_geo suspect_time outlier_nos uncertainty_too_large" variable.attrs["standard_name"] = "status_flag" tu.add_chunking(variable, CHUNKING_2D) tu.add_geolocation_attribute(variable) dataset["data_quality_bitmask"] = variable
def test_check_scaling_ranges_int16_valid_min_max_ok(self): default_array = DefaultData.create_default_array(2, 2, np.float32) default_array[0][0] = 60 # 25000 default_array[0][1] = 10 # 0 default_array[1][0] = np.NaN default_array[1][1] = 14.06 variable = Variable(["y", "x"], default_array) variable.attrs["valid_max"] = 25000 variable.attrs["valid_min"] = 0 variable.encoding = dict([('dtype', np.int16), ('_FillValue', -32767), ('scale_factor', 0.002), ('add_offset', 10)]) DataUtility.check_scaling_ranges(variable)
def _create_uth_variable(width, height, description=None): default_array = DefaultData.create_default_array(width, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) tu.add_fill_value(variable, np.NaN) variable.attrs["coordinates"] = "lon lat" variable.attrs["long_name"] = "upper_tropospheric_humidity" tu.add_units(variable, "%") if description is not None: variable.attrs["description"] = description return variable
def test_check_scaling_ranges_uint16_array_overflow(self): default_array = DefaultData.create_default_array(2, 2, np.float32) default_array[0][0] = 9 # 0 default_array[0][1] = 205.705 # overflow default_array[1][0] = np.NaN default_array[1][1] = 14.06 variable = Variable(["y", "x"], default_array) variable.encoding = dict([('dtype', np.uint16), ('_FillValue', 65535), ('scale_factor', 0.003), ('add_offset', 9)]) try: DataUtility.check_scaling_ranges(variable) self.fail("ValueError expected") except ValueError: pass
def _create_bt_variable(width, height, description=None): default_array = DefaultData.create_default_array(width, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) tu.add_fill_value(variable, np.NaN) variable.attrs["coordinates"] = "lon lat" variable.attrs["standard_name"] = "toa_brightness_temperature" tu.add_units(variable, "K") if description is not None: variable.attrs["description"] = description return variable
def create_CDR_uncertainty(width, height, description, coordinates=None, units=None): default_array = DefaultData.create_default_array(width, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) TemplateUtil.add_fill_value(variable, np.NaN) variable.attrs["description"] = description if units is None: TemplateUtil.add_units(variable, "%") else: TemplateUtil.add_units(variable, units) if coordinates is None: variable.attrs["coordinates"] = "longitude latitude" else: variable.attrs["coordinates"] = coordinates return variable
def test_check_scaling_ranges_int16_valid_min_max_underflow(self): default_array = DefaultData.create_default_array(2, 2, np.float32) default_array[0][0] = 60 # 25000 default_array[0][1] = 9 # underflow default_array[1][0] = np.NaN default_array[1][1] = 14.06 variable = Variable(["y", "x"], default_array) variable.attrs["valid_max"] = 25000 variable.attrs["valid_min"] = 0 variable.encoding = dict([('dtype', np.int16), ('_FillValue', -32767), ('scale_factor', 0.002), ('add_offset', 10)]) try: DataUtility.check_scaling_ranges(variable) self.fail("ValueError expected") except ValueError: pass
def _create_bt_uncertainty_variable(height, long_name): default_array = DefaultData.create_default_array(SWATH_WIDTH, height, np.float32, fill_value=np.NaN) variable = Variable(["y", "x"], default_array) tu.add_units(variable, "K") tu.add_geolocation_attribute(variable) tu.add_encoding(variable, np.int16, DefaultData.get_default_fill_value(np.int16), 0.001, chunksizes=CHUNKS_2D) variable.attrs["valid_max"] = 15000 variable.attrs["valid_min"] = 1 variable.attrs["long_name"] = long_name return variable