def load_sbt_ancillary(group): """ Load the sbt ancillary data retrieved during the worlflow. """ point_data = { DatasetName.DEWPOINT_TEMPERATURE.value: {}, DatasetName.SURFACE_GEOPOTENTIAL.value: {}, DatasetName.TEMPERATURE_2M.value: {}, DatasetName.SURFACE_RELATIVE_HUMIDITY.value: {}, DatasetName.GEOPOTENTIAL.value: {}, DatasetName.RELATIVE_HUMIDITY.value: {}, DatasetName.TEMPERATURE.value: {} } npoints = group[DatasetName.COORDINATOR.value].shape[0] for point in range(npoints): pnt_grp = group[POINT_FMT.format(p=point)] lonlat = tuple(pnt_grp.attrs['lonlat']) # scalars dname = DatasetName.DEWPOINT_TEMPERATURE.value point_data[dname][lonlat] = read_scalar(pnt_grp, dname) dname = DatasetName.SURFACE_GEOPOTENTIAL.value point_data[dname][lonlat] = read_scalar(pnt_grp, dname) dname = DatasetName.TEMPERATURE_2M.value point_data[dname][lonlat] = read_scalar(pnt_grp, dname) dname = DatasetName.SURFACE_RELATIVE_HUMIDITY.value point_data[dname][lonlat] = read_scalar(pnt_grp, dname) # tables dname = DatasetName.GEOPOTENTIAL.value dset = pnt_grp[dname] attrs = {k: v for k, v in dset.attrs.items()} df = read_h5_table(pnt_grp, dname) for column in df.columns: attrs[column] = df[column].values point_data[dname][lonlat] = attrs dname = DatasetName.RELATIVE_HUMIDITY.value dset = pnt_grp[dname] attrs = {k: v for k, v in dset.attrs.items()} df = read_h5_table(pnt_grp, dname) for column in df.columns: attrs[column] = df[column].values point_data[dname][lonlat] = attrs dname = DatasetName.TEMPERATURE.value dset = pnt_grp[dname] attrs = {k: v for k, v in dset.attrs.items()} df = read_h5_table(pnt_grp, dname) for column in df.columns: attrs[column] = df[column].values point_data[dname][lonlat] = attrs return point_data
def run(self): container = acquisitions(self.level1, self.acq_parser_hint) acqs, group = container.get_highest_resolution(granule=self.granule) # output filename format json_fmt = pjoin(POINT_FMT, ALBEDO_FMT, "".join([POINT_ALBEDO_FMT, ".json"])) # input filenames ancillary_fname = self.input()["ancillary"].path sat_sol_fname = self.input()[group]["sat_sol"].path lon_lat_fname = self.input()[group]["lon_lat"].path with self.output().temporary_path() as out_fname: json_data = _format_json( acqs, sat_sol_fname, lon_lat_fname, ancillary_fname, out_fname, self.workflow, ) # keep this as an indented block, that way the target will remain # atomic and be moved upon closing for key in json_data: point, albedo = key json_fname = json_fmt.format(p=point, a=albedo.value) target = pjoin(dirname(out_fname), self.base_dir, json_fname) workdir = pjoin( dirname(out_fname), self.base_dir, POINT_FMT.format(p=point), ALBEDO_FMT.format(a=albedo.value), ) with luigi.LocalTarget(target).open("w") as src: # Thermal processing has two input configurations for modtran_input in json_data[key]["MODTRAN"]: modtran_input["MODTRANINPUT"]["SPECTRAL"]["FILTNM"] = pjoin( workdir, modtran_input["MODTRANINPUT"]["SPECTRAL"]["FILTNM"] ) json.dump(json_data[key], src, cls=JsonEncoder, indent=4)
def prepare_modtran(acquisitions, coordinate, albedos, basedir, modtran_exe): """ Prepares the working directory for a MODTRAN execution. """ data_dir = pjoin(dirname(modtran_exe), 'DATA') if not exists(data_dir): raise OSError('Cannot find MODTRAN') point_dir = pjoin(basedir, POINT_FMT.format(p=coordinate)) for albedo in albedos: if albedo == Albedos.ALBEDO_TH: band_type = BandType.THERMAL else: band_type = BandType.REFLECTIVE acq = [acq for acq in acquisitions if acq.band_type == band_type][0] modtran_work = pjoin(point_dir, ALBEDO_FMT.format(a=albedo.value)) if not exists(modtran_work): os.makedirs(modtran_work) out_fname = pjoin(modtran_work, 'mod5root.in') with open(out_fname, 'w') as src: src.write( POINT_ALBEDO_FMT.format(p=coordinate, a=albedo.value) + '\n') symlink_dir = pjoin(modtran_work, 'DATA') if exists(symlink_dir): os.unlink(symlink_dir) os.symlink(data_dir, symlink_dir) out_fname = pjoin(modtran_work, acq.spectral_filter_file) response = acq.spectral_response(as_list=True) with open(out_fname, 'wb') as src: src.writelines(response)
def prepare_modtran(acquisitions, coordinate, albedos, basedir): """ Prepares the working directory for a MODTRAN execution. """ point_dir = pjoin(basedir, POINT_FMT.format(p=coordinate)) for albedo in albedos: if albedo == Albedos.ALBEDO_TH: band_type = BandType.THERMAL else: band_type = BandType.REFLECTIVE acq = [acq for acq in acquisitions if acq.band_type == band_type][0] modtran_work = pjoin(point_dir, ALBEDO_FMT.format(a=albedo.value)) if not exists(modtran_work): os.makedirs(modtran_work) out_fname = pjoin(modtran_work, acq.spectral_filter_name) # Copy the spectral response filter file to the modtran workdir shutil.copy(acq.spectral_filter_filepath, out_fname)
def test_modtran_run(self): """ Tests that the interface to modtran (run_modtran) works for known inputs. Used to validate environment configuration/setup """ band_names = [ 'BAND-1', 'BAND-2', 'BAND-3', 'BAND-4', 'BAND-5', 'BAND-6', 'BAND-7', 'BAND-8' ] point = 0 albedo = Albedos.ALBEDO_0 # setup mock acquistions object acquisitions = [] for bandn in band_names: acq = mock.MagicMock() acq.acquisition_datetime = datetime(2001, 1, 1) acq.band_type = BandType.REFLECTIVE acq.spectral_response = mock_spectral_response acquisitions.append(acq) # setup mock atmospherics group attrs = {'lonlat': 'TEST'} atmospherics = mock.MagicMock() atmospherics.attrs = attrs atmospherics_group = {POINT_FMT.format(p=point): atmospherics} # Compute base path -- prefix for hdf5 file base_path = ppjoin(GroupName.ATMOSPHERIC_RESULTS_GRP.value, POINT_FMT.format(p=point)) with tempfile.TemporaryDirectory() as workdir: run_dir = pjoin(workdir, POINT_FMT.format(p=point), ALBEDO_FMT.format(a=albedo.value)) os.makedirs(run_dir) # TODO replace json_input copy with json input generation with open(INPUT_JSON, 'r') as fd: json_data = json.load(fd) for mod_input in json_data['MODTRAN']: mod_input['MODTRANINPUT']['SPECTRAL'][ 'FILTNM'] = SPECTRAL_RESPONSE_LS8 with open( pjoin(run_dir, POINT_ALBEDO_FMT.format(p=point, a=albedo.value)) + ".json", 'w') as fd: json.dump(json_data, fd) fid = run_modtran( acquisitions, atmospherics_group, Workflow.STANDARD, npoints=12, # number of track points point=point, albedos=[albedo], modtran_exe=MODTRAN_EXE, basedir=workdir, out_group=None) assert fid # Test base attrs assert fid[base_path].attrs['lonlat'] == 'TEST' assert fid[base_path].attrs['datetime'] == datetime(2001, 1, 1).isoformat() # test albedo headers? # Summarise modtran results to surface reflectance coefficients test_grp = fid[base_path][ALBEDO_FMT.format(a=albedo.value)] nbar_coefficients, _ = coefficients( read_h5_table( fid, pjoin(base_path, ALBEDO_FMT.format(a=albedo.value), DatasetName.CHANNEL.value)), read_h5_table( fid, pjoin(base_path, ALBEDO_FMT.format(a=albedo.value), DatasetName.SOLAR_ZENITH_CHANNEL.value))) expected = pd.read_csv(EXPECTED_CSV, index_col='band_name') pd.testing.assert_frame_equal(nbar_coefficients, expected, check_less_precise=True)
def format_json(acquisitions, ancillary_group, satellite_solar_group, lon_lat_group, workflow, out_group): """ Creates json files for the albedo (0) and thermal """ # angles data sat_view = satellite_solar_group[DatasetName.SATELLITE_VIEW.value] sat_azi = satellite_solar_group[DatasetName.SATELLITE_AZIMUTH.value] longitude = lon_lat_group[DatasetName.LON.value] latitude = lon_lat_group[DatasetName.LAT.value] # retrieve the averaged ancillary if available anc_grp = ancillary_group.get(GroupName.ANCILLARY_AVG_GROUP.value) if anc_grp is None: anc_grp = ancillary_group # ancillary data coordinator = ancillary_group[DatasetName.COORDINATOR.value] aerosol = anc_grp[DatasetName.AEROSOL.value][()] water_vapour = anc_grp[DatasetName.WATER_VAPOUR.value][()] ozone = anc_grp[DatasetName.OZONE.value][()] elevation = anc_grp[DatasetName.ELEVATION.value][()] npoints = coordinator.shape[0] view = numpy.zeros(npoints, dtype='float32') azi = numpy.zeros(npoints, dtype='float32') lat = numpy.zeros(npoints, dtype='float64') lon = numpy.zeros(npoints, dtype='float64') for i in range(npoints): yidx = coordinator['row_index'][i] xidx = coordinator['col_index'][i] view[i] = sat_view[yidx, xidx] azi[i] = sat_azi[yidx, xidx] lat[i] = latitude[yidx, xidx] lon[i] = longitude[yidx, xidx] view_corrected = 180 - view azi_corrected = azi + 180 rlon = 360 - lon # check if in western hemisphere idx = rlon >= 360 rlon[idx] -= 360 idx = (180 - view_corrected) < 0.1 view_corrected[idx] = 180 azi_corrected[idx] = 0 idx = azi_corrected > 360 azi_corrected[idx] -= 360 # get the modtran profiles to use based on the centre latitude _, centre_lat = acquisitions[0].gridded_geo_box().centre_lonlat if out_group is None: out_group = h5py.File('atmospheric-inputs.h5', 'w') if GroupName.ATMOSPHERIC_INPUTS_GRP.value not in out_group: out_group.create_group(GroupName.ATMOSPHERIC_INPUTS_GRP.value) group = out_group[GroupName.ATMOSPHERIC_INPUTS_GRP.value] iso_time = acquisitions[0].acquisition_datetime.isoformat() group.attrs['acquisition-datetime'] = iso_time json_data = {} # setup the json files required by MODTRAN if workflow in (Workflow.STANDARD, Workflow.NBAR): acqs = [a for a in acquisitions if a.band_type == BandType.REFLECTIVE] for p in range(npoints): for alb in Workflow.NBAR.albedos: input_data = {'name': POINT_ALBEDO_FMT.format(p=p, a=str(alb.value)), 'water': water_vapour, 'ozone': ozone, 'doy': acquisitions[0].julian_day(), 'visibility': -aerosol, 'lat': lat[p], 'lon': rlon[p], 'time': acquisitions[0].decimal_hour(), 'sat_azimuth': azi_corrected[p], 'sat_height': acquisitions[0].altitude / 1000.0, 'elevation': elevation, 'sat_view': view_corrected[p], 'albedo': float(alb.value), 'filter_function': acqs[0].spectral_filter_name, 'binary': False } if centre_lat < -23.0: data = mpjson.midlat_summer_albedo(**input_data) else: data = mpjson.tropical_albedo(**input_data) input_data['description'] = 'Input file for MODTRAN' input_data['file_format'] = 'json' input_data.pop('binary') json_data[(p, alb)] = data data = json.dumps(data, cls=JsonEncoder, indent=4) dname = ppjoin(POINT_FMT.format(p=p), ALBEDO_FMT.format(a=alb.value), DatasetName.MODTRAN_INPUT.value) write_scalar(data, dname, group, input_data) # create json for sbt if it has been collected if ancillary_group.attrs.get('sbt-ancillary'): dname = ppjoin(POINT_FMT, DatasetName.ATMOSPHERIC_PROFILE.value) acqs = [a for a in acquisitions if a.band_type == BandType.THERMAL] for p in range(npoints): atmos_profile = read_h5_table(ancillary_group, dname.format(p=p)) n_layers = atmos_profile.shape[0] + 6 elevation = atmos_profile.iloc[0]['GeoPotential_Height'] input_data = {'name': POINT_ALBEDO_FMT.format(p=p, a='TH'), 'ozone': ozone, 'n': n_layers, 'prof_alt': list(atmos_profile['GeoPotential_Height']), 'prof_pres': list(atmos_profile['Pressure']), 'prof_temp': list(atmos_profile['Temperature']), 'prof_water': list(atmos_profile['Relative_Humidity']), 'visibility': -aerosol, 'sat_height': acquisitions[0].altitude / 1000.0, 'gpheight': elevation, 'sat_view': view_corrected[p], 'filter_function': acqs[0].spectral_filter_name, 'binary': False } data = mpjson.thermal_transmittance(**input_data) input_data['description'] = 'Input File for MODTRAN' input_data['file_format'] = 'json' input_data.pop('binary') json_data[(p, Albedos.ALBEDO_TH)] = data data = json.dumps(data, cls=JsonEncoder, indent=4) out_dname = ppjoin(POINT_FMT.format(p=p), ALBEDO_FMT.format(a=Albedos.ALBEDO_TH.value), DatasetName.MODTRAN_INPUT.value) write_scalar(data, out_dname, group, input_data) # attach location info to each point Group for p in range(npoints): lonlat = (coordinator['longitude'][p], coordinator['latitude'][p]) group[POINT_FMT.format(p=p)].attrs['lonlat'] = lonlat return json_data, out_group
def calculate_coefficients(atmospheric_results_group, out_group, compression=H5CompressionFilter.LZF, filter_opts=None): """ Calculate the atmospheric coefficients from the MODTRAN output and used in the BRDF and atmospheric correction. Coefficients are computed for each band for each each coordinate for each atmospheric coefficient. The atmospheric coefficients can be found in `Workflow.STANDARD.atmos_coefficients`. :param atmospheric_results_group: The root HDF5 `Group` that contains the atmospheric results from each MODTRAN run. :param out_group: If set to None (default) then the results will be returned as an in-memory hdf5 file, i.e. the `core` driver. Otherwise, a writeable HDF5 `Group` object. The datasets will be formatted to the HDF5 TABLE specification and the dataset names will be as follows: * DatasetName.NBAR_COEFFICIENTS (if Workflow.STANDARD or Workflow.NBAR) * DatasetName.SBT_COEFFICIENTS (if Workflow.STANDARD or Workflow.SBT) :param compression: The compression filter to use. Default is H5CompressionFilter.LZF :param filter_opts: A dict of key value pairs available to the given configuration instance of H5CompressionFilter. For example H5CompressionFilter.LZF has the keywords *chunks* and *shuffle* available. Default is None, which will use the default settings for the chosen H5CompressionFilter instance. :return: An opened `h5py.File` object, that is either in-memory using the `core` driver, or on disk. """ nbar_coefficients = pd.DataFrame() sbt_coefficients = pd.DataFrame() channel_data = channel_solar_angle = upward = downward = None # Initialise the output group/file if out_group is None: fid = h5py.File('atmospheric-coefficients.h5', driver='core', backing_store=False) else: fid = out_group res = atmospheric_results_group npoints = res.attrs['npoints'] nbar_atmos = res.attrs['nbar_atmospherics'] sbt_atmos = res.attrs['sbt_atmospherics'] for point in range(npoints): point_grp = res[POINT_FMT.format(p=point)] lonlat = point_grp.attrs['lonlat'] timestamp = pd.to_datetime(point_grp.attrs['datetime']) grp_path = ppjoin(POINT_FMT.format(p=point), ALBEDO_FMT) if nbar_atmos: channel_path = ppjoin(grp_path.format(a=Albedos.ALBEDO_0.value), DatasetName.CHANNEL.value) channel_data = read_h5_table(res, channel_path) channel_solar_angle_path = ppjoin( grp_path.format(a=Albedos.ALBEDO_0.value), DatasetName.SOLAR_ZENITH_CHANNEL.value ) channel_solar_angle = read_h5_table(res, channel_solar_angle_path) if sbt_atmos: dname = ppjoin(grp_path.format(a=Albedos.ALBEDO_TH.value), DatasetName.UPWARD_RADIATION_CHANNEL.value) upward = read_h5_table(res, dname) dname = ppjoin(grp_path.format(a=Albedos.ALBEDO_TH.value), DatasetName.DOWNWARD_RADIATION_CHANNEL.value) downward = read_h5_table(res, dname) kwargs = {'channel_data': channel_data, 'solar_zenith_angle': channel_solar_angle, 'upward_radiation': upward, 'downward_radiation': downward} result = coefficients(**kwargs) # insert some datetime/geospatial fields if result[0] is not None: result[0].insert(0, 'POINT', point) result[0].insert(1, 'LONGITUDE', lonlat[0]) result[0].insert(2, 'LATITUDE', lonlat[1]) result[0].insert(3, 'DATETIME', timestamp) nbar_coefficients = nbar_coefficients.append(result[0]) if result[1] is not None: result[1].insert(0, 'POINT', point) result[1].insert(1, 'LONGITUDE', lonlat[0]) result[1].insert(2, 'LATITUDE', lonlat[1]) result[1].insert(3, 'DATETIME', pd.to_datetime(timestamp)) sbt_coefficients = sbt_coefficients.append(result[1]) nbar_coefficients.reset_index(inplace=True) sbt_coefficients.reset_index(inplace=True) attrs = {'npoints': npoints} description = "Coefficients derived from the VNIR solar irradiation." attrs['description'] = description dname = DatasetName.NBAR_COEFFICIENTS.value if GroupName.COEFFICIENTS_GROUP.value not in fid: fid.create_group(GroupName.COEFFICIENTS_GROUP.value) group = fid[GroupName.COEFFICIENTS_GROUP.value] if nbar_atmos: write_dataframe(nbar_coefficients, dname, group, compression, attrs=attrs, filter_opts=filter_opts) description = "Coefficients derived from the THERMAL solar irradiation." attrs['description'] = description dname = DatasetName.SBT_COEFFICIENTS.value if sbt_atmos: write_dataframe(sbt_coefficients, dname, group, compression, attrs=attrs, filter_opts=filter_opts) if out_group is None: return fid
def run_modtran(acquisitions, atmospherics_group, workflow, npoints, point, albedos, modtran_exe, basedir, out_group, compression=H5CompressionFilter.LZF, filter_opts=None): """ Run MODTRAN and channel results. """ lonlat = atmospherics_group[POINT_FMT.format(p=point)].attrs['lonlat'] # determine the output group/file if out_group is None: fid = h5py.File('atmospheric-results.h5', driver='core', backing_store=False) else: fid = out_group # initial attributes base_attrs = {'Point': point, 'lonlat': lonlat, 'datetime': acquisitions[0].acquisition_datetime} base_path = ppjoin(GroupName.ATMOSPHERIC_RESULTS_GRP.value, POINT_FMT.format(p=point)) # what atmospheric calculations have been run and how many points group_name = GroupName.ATMOSPHERIC_RESULTS_GRP.value if group_name not in fid: fid.create_group(group_name) fid[group_name].attrs['npoints'] = npoints applied = workflow in (Workflow.STANDARD, Workflow.NBAR) fid[group_name].attrs['nbar_atmospherics'] = applied applied = workflow in (Workflow.STANDARD, Workflow.SBT) fid[group_name].attrs['sbt_atmospherics'] = applied acqs = acquisitions for albedo in albedos: base_attrs['Albedo'] = albedo.value workpath = pjoin(basedir, POINT_FMT.format(p=point), ALBEDO_FMT.format(a=albedo.value)) json_mod_infile = pjoin(workpath, ''.join( [POINT_ALBEDO_FMT.format(p=point, a=albedo.value), '.json'])) group_path = ppjoin(base_path, ALBEDO_FMT.format(a=albedo.value)) subprocess.check_call([modtran_exe, json_mod_infile], cwd=workpath) chn_fname = glob.glob(pjoin(workpath, '*.chn'))[0] tp6_fname = glob.glob(pjoin(workpath, '*.tp6'))[0] if albedo == Albedos.ALBEDO_TH: acq = [acq for acq in acqs if acq.band_type == BandType.THERMAL][0] channel_data = read_modtran_channel(chn_fname, tp6_fname, acq, albedo) attrs = base_attrs.copy() dataset_name = DatasetName.UPWARD_RADIATION_CHANNEL.value attrs['description'] = ('Upward radiation channel output from ' 'MODTRAN') dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data[0], dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # downward radiation attrs = base_attrs.copy() dataset_name = DatasetName.DOWNWARD_RADIATION_CHANNEL.value attrs['description'] = ('Downward radiation channel output from ' 'MODTRAN') dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data[1], dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) else: acq = [acq for acq in acqs if acq.band_type == BandType.REFLECTIVE][0] # Will require updating to handle JSON output from modtran channel_data = read_modtran_channel(chn_fname, tp6_fname, acq, albedo) attrs = base_attrs.copy() dataset_name = DatasetName.CHANNEL.value attrs['description'] = 'Channel output from MODTRAN' dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data[0], dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # solar zenith angle at surface attrs = base_attrs.copy() dataset_name = DatasetName.SOLAR_ZENITH_CHANNEL.value attrs['description'] = 'Solar zenith angle at different atmosphere levels' dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data[1], dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # metadata for a given point alb_vals = [alb.value for alb in workflow.albedos] fid[base_path].attrs['lonlat'] = lonlat fid[base_path].attrs['datetime'] = acqs[0].acquisition_datetime.isoformat() fid[base_path].attrs.create('albedos', data=alb_vals, dtype=VLEN_STRING) if out_group is None: return fid
def format_tp5(acquisitions, ancillary_group, satellite_solar_group, lon_lat_group, workflow, out_group): """ Creates str formatted tp5 files for the albedo (0, 1) and transmittance (t). """ # angles data sat_view = satellite_solar_group[DatasetName.SATELLITE_VIEW.value] sat_azi = satellite_solar_group[DatasetName.SATELLITE_AZIMUTH.value] longitude = lon_lat_group[DatasetName.LON.value] latitude = lon_lat_group[DatasetName.LAT.value] # retrieve the averaged ancillary if available anc_grp = ancillary_group.get(GroupName.ANCILLARY_AVG_GROUP.value) if anc_grp is None: anc_grp = ancillary_group # ancillary data coordinator = ancillary_group[DatasetName.COORDINATOR.value] aerosol = anc_grp[DatasetName.AEROSOL.value][()] water_vapour = anc_grp[DatasetName.WATER_VAPOUR.value][()] ozone = anc_grp[DatasetName.OZONE.value][()] elevation = anc_grp[DatasetName.ELEVATION.value][()] npoints = coordinator.shape[0] view = numpy.zeros(npoints, dtype='float32') azi = numpy.zeros(npoints, dtype='float32') lat = numpy.zeros(npoints, dtype='float64') lon = numpy.zeros(npoints, dtype='float64') for i in range(npoints): yidx = coordinator['row_index'][i] xidx = coordinator['col_index'][i] view[i] = sat_view[yidx, xidx] azi[i] = sat_azi[yidx, xidx] lat[i] = latitude[yidx, xidx] lon[i] = longitude[yidx, xidx] view_corrected = 180 - view azi_corrected = azi + 180 rlon = 360 - lon # check if in western hemisphere idx = rlon >= 360 rlon[idx] -= 360 idx = (180 - view_corrected) < 0.1 view_corrected[idx] = 180 azi_corrected[idx] = 0 idx = azi_corrected > 360 azi_corrected[idx] -= 360 # get the modtran profiles to use based on the centre latitude _, centre_lat = acquisitions[0].gridded_geo_box().centre_lonlat if centre_lat < -23.0: albedo_profile = MIDLAT_SUMMER_ALBEDO trans_profile = MIDLAT_SUMMER_TRANSMITTANCE else: albedo_profile = TROPICAL_ALBEDO trans_profile = TROPICAL_TRANSMITTANCE if out_group is None: out_group = h5py.File('atmospheric-inputs.h5', 'w') if GroupName.ATMOSPHERIC_INPUTS_GRP.value not in out_group: out_group.create_group(GroupName.ATMOSPHERIC_INPUTS_GRP.value) group = out_group[GroupName.ATMOSPHERIC_INPUTS_GRP.value] iso_time = acquisitions[0].acquisition_datetime.isoformat() group.attrs['acquisition-datetime'] = iso_time tp5_data = {} # setup the tp5 files required by MODTRAN if workflow == Workflow.STANDARD or workflow == Workflow.NBAR: acqs = [a for a in acquisitions if a.band_type == BandType.REFLECTIVE] for p in range(npoints): for alb in Workflow.NBAR.albedos: input_data = { 'water': water_vapour, 'ozone': ozone, 'filter_function': acqs[0].spectral_filter_file, 'visibility': -aerosol, 'elevation': elevation, 'sat_height': acquisitions[0].altitude / 1000.0, 'sat_view': view_corrected[p], 'doy': acquisitions[0].julian_day(), 'binary': 'T' } if alb == Albedos.ALBEDO_T: input_data['albedo'] = 0.0 input_data['sat_view_offset'] = 180.0 - view_corrected[p] data = trans_profile.format(**input_data) else: input_data['albedo'] = float(alb.value) input_data['lat'] = lat[p] input_data['lon'] = rlon[p] input_data['time'] = acquisitions[0].decimal_hour() input_data['sat_azimuth'] = azi_corrected[p] data = albedo_profile.format(**input_data) tp5_data[(p, alb)] = data dname = ppjoin(POINT_FMT.format(p=p), ALBEDO_FMT.format(a=alb.value), DatasetName.TP5.value) write_scalar(numpy.string_(data), dname, group, input_data) # create tp5 for sbt if it has been collected if ancillary_group.attrs.get('sbt-ancillary'): dname = ppjoin(POINT_FMT, DatasetName.ATMOSPHERIC_PROFILE.value) acqs = [a for a in acquisitions if a.band_type == BandType.THERMAL] for p in range(npoints): atmospheric_profile = [] atmos_profile = read_h5_table(ancillary_group, dname.format(p=p)) n_layers = atmos_profile.shape[0] + 6 elevation = atmos_profile.iloc[0]['GeoPotential_Height'] for i, row in atmos_profile.iterrows(): input_data = { 'gpheight': row['GeoPotential_Height'], 'pressure': row['Pressure'], 'airtemp': row['Temperature'], 'humidity': row['Relative_Humidity'], 'zero': 0.0 } atmospheric_profile.append(SBT_FORMAT.format(**input_data)) input_data = { 'ozone': ozone, 'filter_function': acqs[0].spectral_filter_file, 'visibility': -aerosol, 'gpheight': elevation, 'n': n_layers, 'sat_height': acquisitions[0].altitude / 1000.0, 'sat_view': view_corrected[p], 'binary': 'T', 'atmospheric_profile': ''.join(atmospheric_profile) } data = THERMAL_TRANSMITTANCE.format(**input_data) tp5_data[(p, Albedos.ALBEDO_TH)] = data out_dname = ppjoin(POINT_FMT.format(p=p), ALBEDO_FMT.format(a=Albedos.ALBEDO_TH.value), DatasetName.TP5.value) write_scalar(numpy.string_(data), out_dname, group, input_data) # attach location info to each point Group for p in range(npoints): lonlat = (coordinator['longitude'][p], coordinator['latitude'][p]) group[POINT_FMT.format(p=p)].attrs['lonlat'] = lonlat return tp5_data, out_group
def run_modtran(acquisitions, atmospherics_group, workflow, npoints, point, albedos, modtran_exe, basedir, out_group, compression=H5CompressionFilter.LZF, filter_opts=None): """ Run MODTRAN and return the flux and channel results. """ lonlat = atmospherics_group[POINT_FMT.format(p=point)].attrs['lonlat'] # determine the output group/file if out_group is None: fid = h5py.File('atmospheric-results.h5', driver='core', backing_store=False) else: fid = out_group # initial attributes base_attrs = { 'Point': point, 'lonlat': lonlat, 'datetime': acquisitions[0].acquisition_datetime } base_path = ppjoin(GroupName.ATMOSPHERIC_RESULTS_GRP.value, POINT_FMT.format(p=point)) # what atmospheric calculations have been run and how many points group_name = GroupName.ATMOSPHERIC_RESULTS_GRP.value if group_name not in fid: fid.create_group(group_name) fid[group_name].attrs['npoints'] = npoints applied = workflow == Workflow.STANDARD or workflow == Workflow.NBAR fid[group_name].attrs['nbar_atmospherics'] = applied applied = workflow == Workflow.STANDARD or workflow == Workflow.SBT fid[group_name].attrs['sbt_atmospherics'] = applied acqs = acquisitions for albedo in albedos: base_attrs['Albedo'] = albedo.value workpath = pjoin(basedir, POINT_FMT.format(p=point), ALBEDO_FMT.format(a=albedo.value)) group_path = ppjoin(base_path, ALBEDO_FMT.format(a=albedo.value)) subprocess.check_call([modtran_exe], cwd=workpath) chn_fname = glob.glob(pjoin(workpath, '*.chn'))[0] if albedo == Albedos.ALBEDO_TH: acq = [acq for acq in acqs if acq.band_type == BandType.THERMAL][0] channel_data = read_modtran_channel(chn_fname, acq, albedo) # upward radiation attrs = base_attrs.copy() dataset_name = DatasetName.UPWARD_RADIATION_CHANNEL.value attrs['description'] = ('Upward radiation channel output from ' 'MODTRAN') dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data[0], dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # downward radiation attrs = base_attrs.copy() dataset_name = DatasetName.DOWNWARD_RADIATION_CHANNEL.value attrs['description'] = ('Downward radiation channel output from ' 'MODTRAN') dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data[1], dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) else: acq = [ acq for acq in acqs if acq.band_type == BandType.REFLECTIVE ][0] flux_fname = glob.glob(pjoin(workpath, '*_b.flx'))[0] flux_data, altitudes = read_modtran_flux(flux_fname) channel_data = read_modtran_channel(chn_fname, acq, albedo) # ouput the flux data attrs = base_attrs.copy() dset_name = ppjoin(group_path, DatasetName.FLUX.value) attrs['description'] = 'Flux output from MODTRAN' write_dataframe(flux_data, dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # output the altitude data attrs = base_attrs.copy() attrs['description'] = 'Altitudes output from MODTRAN' attrs['altitude_levels'] = altitudes.shape[0] attrs['units'] = 'km' dset_name = ppjoin(group_path, DatasetName.ALTITUDES.value) write_dataframe(altitudes, dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # accumulate the solar irradiance transmittance = True if albedo == Albedos.ALBEDO_T else False response = acq.spectral_response() accumulated = calculate_solar_radiation(flux_data, response, altitudes.shape[0], transmittance) attrs = base_attrs.copy() dset_name = ppjoin(group_path, DatasetName.SOLAR_IRRADIANCE.value) description = ("Accumulated solar irradiation for point {} " "and albedo {}.") attrs['description'] = description.format(point, albedo.value) write_dataframe(accumulated, dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) attrs = base_attrs.copy() dataset_name = DatasetName.CHANNEL.value attrs['description'] = 'Channel output from MODTRAN' dset_name = ppjoin(group_path, dataset_name) write_dataframe(channel_data, dset_name, fid, compression, attrs=attrs, filter_opts=filter_opts) # metadata for a given point alb_vals = [alb.value for alb in workflow.albedos] fid[base_path].attrs['lonlat'] = lonlat fid[base_path].attrs['datetime'] = acqs[0].acquisition_datetime.isoformat() fid[base_path].attrs.create('albedos', data=alb_vals, dtype=VLEN_STRING) if out_group is None: return fid
def collect_sbt_ancillary( acquisition, lonlats, ancillary_path, invariant_fname=None, out_group=None, compression=H5CompressionFilter.LZF, filter_opts=None, ): """ Collects the ancillary data required for surface brightness temperature. :param acquisition: An instance of an `Acquisition` object. :param lonlats: A `list` of tuples containing (longitude, latitude) coordinates. :param ancillary_path: A `str` containing the directory pathname to the ECMWF ancillary data. :param invariant_fname: A `str` containing the file pathname to the invariant geopotential data. :param out_group: If set to None (default) then the results will be returned as an in-memory hdf5 file, i.e. the `core` driver. Otherwise, a writeable HDF5 `Group` object. :param compression: The compression filter to use. Default is H5CompressionFilter.LZF :filter_opts: A dict of key value pairs available to the given configuration instance of H5CompressionFilter. For example H5CompressionFilter.LZF has the keywords *chunks* and *shuffle* available. Default is None, which will use the default settings for the chosen H5CompressionFilter instance. :return: An opened `h5py.File` object, that is either in-memory using the `core` driver, or on disk. """ # Initialise the output files if out_group is None: fid = h5py.File("sbt-ancillary.h5", "w", driver="core", backing_store=False) else: fid = out_group fid.attrs["sbt-ancillary"] = True dt = acquisition.acquisition_datetime description = ("Combined Surface and Pressure Layer data retrieved from " "the ECWMF catalogue.") attrs = {"description": description, "Date used for querying ECWMF": dt} for i, lonlat in enumerate(lonlats): pnt = POINT_FMT.format(p=i) # get data located at the surface dew = ecwmf_dewpoint_temperature(ancillary_path, lonlat, dt) t2m = ecwmf_temperature_2metre(ancillary_path, lonlat, dt) sfc_prs = ecwmf_surface_pressure(ancillary_path, lonlat, dt) sfc_hgt = ecwmf_elevation(invariant_fname, lonlat) sfc_rh = relative_humdity(t2m[0], dew[0]) # output the scalar data along with the attrs dname = ppjoin(pnt, DatasetName.DEWPOINT_TEMPERATURE.value) write_scalar(dew[0], dname, fid, dew[1]) dname = ppjoin(pnt, DatasetName.TEMPERATURE_2M.value) write_scalar(t2m[0], dname, fid, t2m[1]) dname = ppjoin(pnt, DatasetName.SURFACE_PRESSURE.value) write_scalar(sfc_prs[0], dname, fid, sfc_prs[1]) dname = ppjoin(pnt, DatasetName.SURFACE_GEOPOTENTIAL.value) write_scalar(sfc_hgt[0], dname, fid, sfc_hgt[1]) dname = ppjoin(pnt, DatasetName.SURFACE_RELATIVE_HUMIDITY.value) attrs = {"description": "Relative Humidity calculated at the surface"} write_scalar(sfc_rh, dname, fid, attrs) # get the data from each of the pressure levels (1 -> 1000 ISBL) gph = ecwmf_geo_potential(ancillary_path, lonlat, dt) tmp = ecwmf_temperature(ancillary_path, lonlat, dt) rh = ecwmf_relative_humidity(ancillary_path, lonlat, dt) dname = ppjoin(pnt, DatasetName.GEOPOTENTIAL.value) write_dataframe(gph[0], dname, fid, compression, attrs=gph[1], filter_opts=filter_opts) dname = ppjoin(pnt, DatasetName.TEMPERATURE.value) write_dataframe(tmp[0], dname, fid, compression, attrs=tmp[1], filter_opts=filter_opts) dname = ppjoin(pnt, DatasetName.RELATIVE_HUMIDITY.value) write_dataframe(rh[0], dname, fid, compression, attrs=rh[1], filter_opts=filter_opts) # combine the surface and higher pressure layers into a single array cols = [ "GeoPotential_Height", "Pressure", "Temperature", "Relative_Humidity" ] layers = pandas.DataFrame(columns=cols, index=range(rh[0].shape[0]), dtype="float64") layers["GeoPotential_Height"] = gph[0]["GeoPotential_Height"].values layers["Pressure"] = ECWMF_LEVELS[::-1] layers["Temperature"] = tmp[0]["Temperature"].values layers["Relative_Humidity"] = rh[0]["Relative_Humidity"].values # define the surface level df = pandas.DataFrame( { "GeoPotential_Height": sfc_hgt[0], "Pressure": sfc_prs[0], "Temperature": kelvin_2_celcius(t2m[0]), "Relative_Humidity": sfc_rh, }, index=[0], ) # MODTRAN requires the height to be ascending # and the pressure to be descending wh = (layers["GeoPotential_Height"] > sfc_hgt[0]) & (layers["Pressure"] < sfc_prs[0].round()) df = df.append(layers[wh]) df.reset_index(drop=True, inplace=True) dname = ppjoin(pnt, DatasetName.ATMOSPHERIC_PROFILE.value) write_dataframe(df, dname, fid, compression, attrs=attrs, filter_opts=filter_opts) fid[pnt].attrs["lonlat"] = lonlat if out_group is None: return fid
def card4l(level1, granule, workflow, vertices, method, pixel_quality, landsea, tle_path, aerosol, brdf, ozone_path, water_vapour, dem_path, dsm_fname, invariant_fname, modtran_exe, out_fname, ecmwf_path=None, rori=0.52, buffer_distance=8000, compression=H5CompressionFilter.LZF, filter_opts=None, h5_driver=None, acq_parser_hint=None, normalized_solar_zenith=45.): """ CEOS Analysis Ready Data for Land. A workflow for producing standardised products that meet the CARD4L specification. :param level1: A string containing the full file pathname to the level1 dataset. :param granule: A string containing the granule id to process. :param workflow: An enum from wagl.constants.Workflow representing which workflow workflow to run. :param vertices: An integer 2-tuple indicating the number of rows and columns of sample-locations ("coordinator") to produce. The vertex columns should be an odd number. :param method: An enum from wagl.constants.Method representing the interpolation method to use during the interpolation of the atmospheric coefficients. :param pixel_quality: A bool indicating whether or not to run pixel quality. :param landsea: A string containing the full file pathname to the directory containing the land/sea mask datasets. :param tle_path: A string containing the full file pathname to the directory containing the two line element datasets. :param aerosol: A string containing the full file pathname to the HDF5 file containing the aerosol data. :param brdf: A dict containing either user-supplied BRDF values, or the full file pathname to the directory containing the BRDF data and the decadal averaged BRDF data used for acquisitions prior to TERRA/AQUA satellite operations. :param ozone_path: A string containing the full file pathname to the directory containing the ozone datasets. :param water_vapour: A string containing the full file pathname to the directory containing the water vapour datasets. :param dem_path: A string containing the full file pathname to the directory containing the reduced resolution DEM. :param dsm_path: A string containing the full file pathname to the directory containing the Digital Surface Workflow for use in terrain illumination correction. :param invariant_fname: A string containing the full file pathname to the image file containing the invariant geo-potential data for use within the SBT process. :param modtran_exe: A string containing the full file pathname to the MODTRAN executable. :param out_fname: A string containing the full file pathname that will contain the output data from the data standardisation process. executable. :param ecmwf_path: A string containing the full file pathname to the directory containing the data from the European Centre for Medium Weather Forcast, for use within the SBT process. :param rori: A floating point value for surface reflectance adjustment. TODO Fuqin to add additional documentation for this parameter. Default is 0.52. :param buffer_distance: A number representing the desired distance (in the same units as the acquisition) in which to calculate the extra number of pixels required to buffer an image. Default is 8000, which for an acquisition using metres would equate to 8000 metres. :param compression: An enum from hdf5.compression.H5CompressionFilter representing the desired compression filter to use for writing H5 IMAGE and TABLE class datasets to disk. Default is H5CompressionFilter.LZF. :param filter_opts: A dict containing any additional keyword arguments when generating the configuration for the given compression Filter. Default is None. :param h5_driver: The specific HDF5 file driver to use when creating the output HDF5 file. See http://docs.h5py.org/en/latest/high/file.html#file-drivers for more details. Default is None; which writes direct to disk using the appropriate driver for the underlying OS. :param acq_parser_hint: A string containing any hints to provide the acquisitions loader with. :param normalized_solar_zenith: Solar zenith angle to normalize for (in degrees). Default is 45 degrees. """ json_fmt = pjoin(POINT_FMT, ALBEDO_FMT, ''.join([POINT_ALBEDO_FMT, '.json'])) nvertices = vertices[0] * vertices[1] container = acquisitions(level1, hint=acq_parser_hint) # TODO: pass through an acquisitions container rather than pathname with h5py.File(out_fname, 'w', driver=h5_driver) as fid: fid.attrs['level1_uri'] = level1 for grp_name in container.supported_groups: log = STATUS_LOGGER.bind(level1=container.label, granule=granule, granule_group=grp_name) # root group for a given granule and resolution group root = fid.create_group(ppjoin(granule, grp_name)) acqs = container.get_acquisitions(granule=granule, group=grp_name) # include the resolution as a group attribute root.attrs['resolution'] = acqs[0].resolution # longitude and latitude log.info('Latitude-Longitude') create_lon_lat_grids(acqs[0], root, compression, filter_opts) # satellite and solar angles log.info('Satellite-Solar-Angles') calculate_angles(acqs[0], root[GroupName.LON_LAT_GROUP.value], root, compression, filter_opts, tle_path) if workflow in (Workflow.STANDARD, Workflow.NBAR): # DEM log.info('DEM-retriveal') get_dsm(acqs[0], dsm_fname, buffer_distance, root, compression, filter_opts) # slope & aspect log.info('Slope-Aspect') slope_aspect_arrays(acqs[0], root[GroupName.ELEVATION_GROUP.value], buffer_distance, root, compression, filter_opts) # incident angles log.info('Incident-Angles') incident_angles(root[GroupName.SAT_SOL_GROUP.value], root[GroupName.SLP_ASP_GROUP.value], root, compression, filter_opts) # exiting angles log.info('Exiting-Angles') exiting_angles(root[GroupName.SAT_SOL_GROUP.value], root[GroupName.SLP_ASP_GROUP.value], root, compression, filter_opts) # relative azimuth slope log.info('Relative-Azimuth-Angles') incident_group_name = GroupName.INCIDENT_GROUP.value exiting_group_name = GroupName.EXITING_GROUP.value relative_azimuth_slope(root[incident_group_name], root[exiting_group_name], root, compression, filter_opts) # self shadow log.info('Self-Shadow') self_shadow(root[incident_group_name], root[exiting_group_name], root, compression, filter_opts) # cast shadow solar source direction log.info('Cast-Shadow-Solar-Direction') dsm_group_name = GroupName.ELEVATION_GROUP.value calculate_cast_shadow(acqs[0], root[dsm_group_name], root[GroupName.SAT_SOL_GROUP.value], buffer_distance, root, compression, filter_opts) # cast shadow satellite source direction log.info('Cast-Shadow-Satellite-Direction') calculate_cast_shadow(acqs[0], root[dsm_group_name], root[GroupName.SAT_SOL_GROUP.value], buffer_distance, root, compression, filter_opts, False) # combined shadow masks log.info('Combined-Shadow') combine_shadow_masks(root[GroupName.SHADOW_GROUP.value], root[GroupName.SHADOW_GROUP.value], root[GroupName.SHADOW_GROUP.value], root, compression, filter_opts) # nbar and sbt ancillary log = STATUS_LOGGER.bind(level1=container.label, granule=granule, granule_group=None) # granule root group root = fid[granule] # get the highest resolution group containing supported bands acqs, grp_name = container.get_highest_resolution(granule=granule) grn_con = container.get_granule(granule=granule, container=True) res_group = root[grp_name] log.info('Ancillary-Retrieval') nbar_paths = { 'aerosol_dict': aerosol, 'water_vapour_dict': water_vapour, 'ozone_path': ozone_path, 'dem_path': dem_path, 'brdf_dict': brdf } collect_ancillary(grn_con, res_group[GroupName.SAT_SOL_GROUP.value], nbar_paths, ecmwf_path, invariant_fname, vertices, root, compression, filter_opts) # atmospherics log.info('Atmospherics') ancillary_group = root[GroupName.ANCILLARY_GROUP.value] # satellite/solar angles and lon/lat for a resolution group sat_sol_grp = res_group[GroupName.SAT_SOL_GROUP.value] lon_lat_grp = res_group[GroupName.LON_LAT_GROUP.value] # TODO: supported acqs in different groups pointing to different response funcs json_data, _ = format_json(acqs, ancillary_group, sat_sol_grp, lon_lat_grp, workflow, root) # atmospheric inputs group inputs_grp = root[GroupName.ATMOSPHERIC_INPUTS_GRP.value] # radiative transfer for each point and albedo for key in json_data: point, albedo = key log.info('Radiative-Transfer', point=point, albedo=albedo.value) with tempfile.TemporaryDirectory() as tmpdir: prepare_modtran(acqs, point, [albedo], tmpdir) point_dir = pjoin(tmpdir, POINT_FMT.format(p=point)) workdir = pjoin(point_dir, ALBEDO_FMT.format(a=albedo.value)) json_mod_infile = pjoin( tmpdir, json_fmt.format(p=point, a=albedo.value)) with open(json_mod_infile, 'w') as src: json_dict = json_data[key] if albedo == Albedos.ALBEDO_TH: json_dict["MODTRAN"][0]["MODTRANINPUT"]["SPECTRAL"]["FILTNM"] = \ "%s/%s" % (workdir, json_dict["MODTRAN"][0]["MODTRANINPUT"]["SPECTRAL"]["FILTNM"]) json_dict["MODTRAN"][1]["MODTRANINPUT"]["SPECTRAL"]["FILTNM"] = \ "%s/%s" % (workdir, json_dict["MODTRAN"][1]["MODTRANINPUT"]["SPECTRAL"]["FILTNM"]) else: json_dict["MODTRAN"][0]["MODTRANINPUT"]["SPECTRAL"]["FILTNM"] = \ "%s/%s" % (workdir, json_dict["MODTRAN"][0]["MODTRANINPUT"]["SPECTRAL"]["FILTNM"]) json.dump(json_dict, src, cls=JsonEncoder, indent=4) run_modtran(acqs, inputs_grp, workflow, nvertices, point, [albedo], modtran_exe, tmpdir, root, compression, filter_opts) # atmospheric coefficients log.info('Coefficients') results_group = root[GroupName.ATMOSPHERIC_RESULTS_GRP.value] calculate_coefficients(results_group, root, compression, filter_opts) esun_values = {} # interpolate coefficients for grp_name in container.supported_groups: log = STATUS_LOGGER.bind(level1=container.label, granule=granule, granule_group=grp_name) log.info('Interpolation') # acquisitions and available bands for the current group level acqs = container.get_acquisitions(granule=granule, group=grp_name) nbar_acqs = [ acq for acq in acqs if acq.band_type == BandType.REFLECTIVE ] sbt_acqs = [ acq for acq in acqs if acq.band_type == BandType.THERMAL ] res_group = root[grp_name] sat_sol_grp = res_group[GroupName.SAT_SOL_GROUP.value] comp_grp = root[GroupName.COEFFICIENTS_GROUP.value] for coefficient in workflow.atmos_coefficients: if coefficient is AtmosphericCoefficients.ESUN: continue if coefficient in Workflow.NBAR.atmos_coefficients: band_acqs = nbar_acqs else: band_acqs = sbt_acqs for acq in band_acqs: log.info('Interpolate', band_id=acq.band_id, coefficient=coefficient.value) interpolate(acq, coefficient, ancillary_group, sat_sol_grp, comp_grp, res_group, compression, filter_opts, method) # standardised products band_acqs = [] if workflow in (Workflow.STANDARD, Workflow.NBAR): band_acqs.extend(nbar_acqs) if workflow in (Workflow.STANDARD, Workflow.SBT): band_acqs.extend(sbt_acqs) for acq in band_acqs: interp_grp = res_group[GroupName.INTERP_GROUP.value] if acq.band_type == BandType.THERMAL: log.info('SBT', band_id=acq.band_id) surface_brightness_temperature(acq, interp_grp, res_group, compression, filter_opts) else: atmos_coefs = read_h5_table( comp_grp, DatasetName.NBAR_COEFFICIENTS.value) esun_values[acq.band_name] = ( atmos_coefs[atmos_coefs.band_name == acq.band_name][ AtmosphericCoefficients.ESUN.value]).values[0] slp_asp_grp = res_group[GroupName.SLP_ASP_GROUP.value] rel_slp_asp = res_group[GroupName.REL_SLP_GROUP.value] incident_grp = res_group[GroupName.INCIDENT_GROUP.value] exiting_grp = res_group[GroupName.EXITING_GROUP.value] shadow_grp = res_group[GroupName.SHADOW_GROUP.value] log.info('Surface-Reflectance', band_id=acq.band_id) calculate_reflectance( acq, interp_grp, sat_sol_grp, slp_asp_grp, rel_slp_asp, incident_grp, exiting_grp, shadow_grp, ancillary_group, rori, res_group, compression, filter_opts, normalized_solar_zenith, esun_values[acq.band_name]) # pixel quality sbt_only = workflow == Workflow.SBT if pixel_quality and can_pq(level1, acq_parser_hint) and not sbt_only: run_pq(level1, res_group, landsea, res_group, compression, filter_opts, AP.NBAR, acq_parser_hint) run_pq(level1, res_group, landsea, res_group, compression, filter_opts, AP.NBART, acq_parser_hint) def get_band_acqs(grp_name): acqs = container.get_acquisitions(granule=granule, group=grp_name) nbar_acqs = [ acq for acq in acqs if acq.band_type == BandType.REFLECTIVE ] sbt_acqs = [ acq for acq in acqs if acq.band_type == BandType.THERMAL ] band_acqs = [] if workflow in (Workflow.STANDARD, Workflow.NBAR): band_acqs.extend(nbar_acqs) if workflow in (Workflow.STANDARD, Workflow.SBT): band_acqs.extend(sbt_acqs) return band_acqs # wagl parameters parameters = { 'vertices': list(vertices), 'method': method.value, 'rori': rori, 'buffer_distance': buffer_distance, 'normalized_solar_zenith': normalized_solar_zenith, 'esun': esun_values } # metadata yaml's metadata = root.create_group(DatasetName.METADATA.value) create_ard_yaml( { grp_name: get_band_acqs(grp_name) for grp_name in container.supported_groups }, ancillary_group, metadata, parameters, workflow)