def all_netcdfs(self): gzs = glob.glob( Model.path() + "Weather/*/{}.gz".format(TEMP_PRODUCT)) util.expand_in_place([g for g in gzs if Path(g).is_file()]) ncs = glob.glob( Model.path() + "Weather/*/{}".format(TEMP_PRODUCT)) return super().all_netcdfs() + [p for p in ncs if Path(p).is_file()]
def __init__(self): self.name = "TMAX" # TODO - Proper metadata! authors = [ Author(name="", email="", organisation="") ] pub_date = dt.datetime(2015, 9, 9) abstract = Abstracts("") self.metadata = ModelMetaData(authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) self.ident = "Temperature" self.code = "Tmax" self.path = os.path.abspath(Model.path() + 'Weather') + '/' self.crs = "EPSG:4326" self.outputs = { "type": "index", "readings": { "path": self.path, "url": "", "prefix": "MaxT_SFC", "suffix": ".nc" } }
def __init__(self): self.name = "kbdi" # TODO - Proper metadata! authors = [ Author(name="Keetch", email="", organisation=""), Author(name="Byram", email="", organisation="") ] pub_date = dt.datetime(2015, 9, 9) abstract = Abstracts("") self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) self.ident = "Keetch-Byram Drought" self.code = "KBDI" self.path = os.path.abspath(Model.path() + 'KBDI') + '/' self.crs = "EPSG:3111" self.outputs = { "type": "index", "readings": { "path": self.path, "url": "", "prefix": "KBDI_SFC", "suffix": ".nc" } }
def consolidate_to_year_archive(self, year, file_name): y_begin = dt.datetime(year, 1, 1) y_end = dt.datetime(year, 12, 31) fl = [] # # Don't even attempt if still in this year # # ie., must be a year in the past # if y_end.year > dt.datetime.now().year: # return False for d in pd.date_range(y_begin, y_end): fl += [ y + '/' + file_name for y in glob.glob(Model.path() + "Weather/{}*".format(d.strftime("%Y%m%d"))) ] minimal_file_list = list(set(fl)) files = [f for f in minimal_file_list if Path(f).is_file()] file_list = list(files) if len(file_list) > 0: file_list.sort() xr1 = xr.open_dataset(file_list.pop(0)) while len(file_list) > 1: xr2 = xr.open_dataset(file_list.pop(0)) # if dev.DEBUG: # logger.debug("\n--> Loading BOM SFC TS by overwriting older data: %s" % fl[0]) xr1 = self.load_by_overwrite(xr1, xr2) xr1.attrs['var_name'] = self.outputs["readings"]["prefix"] logger.debug(xr1) if xr1['time'] is None: logger.debug('No temporal component to DataSet?!') return False else: # This needs refinement to extract days worth of records instead of actual time entries time_records = xr1.sel(time=str(year)) xr1.to_netcdf(self.archive_name(year), format='NETCDF4') return True # if len(time_records) >= 365: # # This could potentially give us 365 milliseconds/seconds/hours worth of data. TODO - Just days!! # xr1.to_netcdf(self.archive_name(year), format='NETCDF4') # return True # else: # # Can't yet save the year as an archive it's incomplete # logger.debug('Attempted to create an annual archive for %s,' # 'but the year (%d) is incomplete, containing just %d records' % (self.code, # year, # len(time_records))) # return False else: return False
def __init__(self): self.name = "jasmin" # TODO - Proper metadata! authors = [ Author(name="Imtiaz Dharssi", email="", organisation="Bureau of Meteorology, Australia"), Author(name="Vinodkumar", email="", organisation="Bureau of Meteorology, Australia") ] pub_date = dt.datetime(2017, 10, 1) abstract = Abstracts( "Accurate soil dryness information is essential for the calculation of accurate fire danger \ ratings, fire behavior prediction, flood forecasting and landslip warnings. Soil dryness \ also strongly influences temperatures and heatwave development by controlling the \ partitioning of net surface radiation into sensible, latent and ground heat fluxes. Rainfall \ forecasts are crucial for many applications and many studies suggest that soil dryness \ can significantly influence rainfall. Currently, soil dryness for fire danger prediction in \ Australia is estimated using very simple water balance models developed in the 1960s \ that ignore many important factors such as incident solar radiation, soil types, vegeta- \ tion height and root depth. This work presents a prototype high resolution soil moisture \ analysis system based around the Joint UK Land Environment System (JULES) land \ surface model. This prototype system is called the JULES based Australian Soil Mois- \ ture INformation (JASMIN) system. The JASMIN system can include data from many \ sources; such as surface observations of rainfall, temperature, dew-point temperature, \ wind speed, surface pressure as well as satellite derived measurements of rainfall, sur- \ face soil moisture, downward surface short-wave radiation, skin temperature, leaf area \ index and tree heights. The JASMIN system estimates soil moisture on four soil layers \ over the top 3 meters of soil, the surface layer has a thickness of 10 cm. The system \ takes into account the effect of different vegetation types, root depth, stomatal resis- \ tance and spatially varying soil texture. The analysis system has a one hour time-step \ with daily updating. For the surface soil layer, verification against ground based soil \ moisture observations from the OzNet, CosmOz and OzFlux networks shows that the \ JASMIN system is significantly more accurate than other soil moisture analysis sys- \ tem used at the Bureau of Meteorology. For the root-zone, the JASMIN system has \ similar skill to other commonly used soil moisture analysis systems. The Extended \ Triple Collocation (ETC) verification method also confirms the high skill of the JASMIN \ system.") self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) self.source = "http://opendap.bom.gov.au:8080/thredds/catalog/c35ee8d2a475e10ea06d0ad53b46ce2a/JASMIN_land_dryness/catalog.html" self.path = os.path.abspath(Model.path() + 'JASMIN') + '/' self.ident = "JASMIN" self.code = "JASMIN" self.outputs = { "type": "index", "readings": { "path": self.path, "url": "", "prefix": "sm", "suffix": ".nc" } }
def __init__(self): self.name = "gfdi" # TODO - Proper metadata! authors = [ Author(name="Danielle Martin", email="", organisation="Country Fire Authority"), Author(name="Alex Chen", email="", organisation="Country Fire Authority"), Author(name="David Nichols", email="", organisation="Country Fire Authority"), Author(name="Rachel Bessell", email="", organisation="Country Fire Authority"), Author(name="Susan Kiddie", email="", organisation="Country Fire Authority"), Author(name="Jude Alexander", email="", organisation="Country Fire Authority") ] pub_date = dt.datetime(2015, 9, 9) abstract = Abstracts( "Depending on the growth stage of grass, certain physiological characteristics, such \ as water content and degree of curing (senescence), determine the susceptibility of \ grass to ignite or to propagate a fire. Grassland curing is an integral component of \ the Grassland Fire Danger Index (GFDI), which is used to determine the Fire Danger \ Ratings (FDRs). In providing input for the GFDI for the whole state of Victoria, this \ paper reports the development of two amalgamated products by the Country Fire \ Authority (CFA): (i) an automated web-based system which integrates weekly field \ observations with real time satellite data for operational grassland curing mapping, \ and (ii) a satellite model based on historical satellite data and historical field \ observations. Both products combined will provide an improved state-wide map of \ curing tailored for Victorian grasslands.") self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) self.ident = "Grass Fire Danger" self.code = "GFDI" self.path = os.path.abspath(Model.path() + 'GFDI') + '/' self.crs = "EPSG:3111" self.outputs = { "type": "index", "readings": { "path": self.path, "url": "", "prefix": "GFDI_SFC", "suffix": ".nc" } }
async def consolidate_year(self, y): with open(Model.path() + 'australia.pickle', 'rb') as pickled_australia: australia = pickle.load(pickled_australia) AUmask = regionmask.Regions_cls( 'AUmask', [0], ['Australia'], ['AU'], [australia.polygon]) for year in range(y, y + 1): with xr.open_mfdataset("%s%s_%s*" % (self.outputs['readings']['path'], self.outputs['readings']['prefix'], year), chunks={'time': 1}) as ds: ds['DFMC'] = ds['DFMC'].isel(observations=0, drop=True) dm = ds['DFMC'].isel(time=0) mask = AUmask.mask(dm['longitude'], dm['latitude']) mask_ma = np.ma.masked_invalid(mask) ds = ds.where(mask_ma == 0) logger.debug("--- Saving %s" % (year)) ds.attrs = dict() ds.attrs['crs'] = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " ds.attrs['var_name'] = 'DFMC' ds['time'].attrs['long_name'] = 'time' ds['time'].attrs['name'] = 'time' ds['time'].attrs['standard_name'] = 'time' ds['DFMC'].attrs['units'] = 'z-scores' ds['DFMC'].attrs[ 'long_name'] = 'Dead Fine Fuels Moisture Content - (Percentage wet over dry by weight)' ds['DFMC'].attrs['name'] = self.outputs['readings']['prefix'] ds['DFMC'].attrs['standard_name'] = self.outputs['readings']['prefix'] # logger.debug(ds) # logger.debug(ds[self.outputs['readings']['prefix']]) # File is opened by itself, can't save because we're self locking temp = ds # close the handle first and then save tfile = "%s%s_%s.nc" % (self.path, self.outputs['readings']['prefix'], str(year)) try: os.rename(tfile, tfile + '.tmp') except e: logger.error(e) try: ds.to_netcdf(tfile, mode='w', format='NETCDF4') os.remove(tfile + '.tmp') except e: logger.error(e) return false return True
def all_netcdfs(self): """ Pattern matches potential paths where files could be stored to those that actually exist. Warning: Files outside this directory aren't indexed and won't get ingested. :param fname: :return: """ possibles = [ p for p in glob.glob(Model.path() + "native/jasmin.vol.smc.*.nc") ] return [f for f in possibles if Path(f).is_file()]
def __init__(self): # TODO - Proper metadata! authors = [ Author(name="BOM", email="*****@*****.**", organisation="Bureau of Meteorology, Australia") ] pub_date = dt.datetime(2015, 9, 9) abstract = Abstracts( "The information presented on the Australian Landscape Water Balance website is produced by \ the Bureau's operational Australian Water Resources Assessment Landscape model (AWRA-L). AWRA-L is a daily 0.05°\ grid-based, distributed water balance model, conceptualised as a small unimpaired catchment. It simulates the\ flow of water through the landscape from the rainfall entering the grid cell through the vegetation and soil\ moisture stores and then out of the grid cell through evapotranspiration, runoff or deep drainage to the groundwater.\n \ Each spatial unit (grid cell) in AWRA-L is divided into two hydrological response units (HRU) representing deep \ rooted vegetation (trees) and shallow rooted vegetation (grass). Hydrological processes are modelled separately \ for each HRU, then the resulting fluxes or stores are combined to give cell outputs. Hydrologically, these two \ HRUs differ in their aerodynamic control of evaporation and their interception capacities but the main difference\ is in their degree of access to different soil layers. The AWRA-L model has three soil layers (upper: 0–10 cm, \ lower: 10–100 cm, and deep: 1–6 m). The shallow rooted vegetation has access to subsurface soil moisture in the \ upper and lower soil stores only, while the deep rooted vegetation also has access to moisture in the deep store.\ Root Zone Soil Moisture is the sum of water in the AWRA-L Upper and Lower soil layers and represents the percentage \ of available water content in the top 1 m of the soil profile. The maximum storage within the soil layer is calculated \ from the depth of the soil and the relative soil water storage capacity. The soil properties that control the storage of\ water are derived from the continental scale mapping within Australian Soil Resources Information System (Johnston et al., 2003).\ The relative available water capacity of the soil layer is derived from ASRIS information as the available water capacity of a\ layer divided by its thickness. Pedotransfer functions are used to relate soil hydraulic properties to soil textural class.\ Soil drainage and moisture dynamics are then based on water balance considerations for each soil layer. The shallow and deep \ rooted vegetation can both draw on this combined layer. Actual soil moisture grids estimate the percentage of available water \ content rather than total soil water volume. Relative soil moisture grids, like the other grids, represent the long term deciles." ) self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) self.path = os.path.abspath(Model.path() + 'AWRA-L') + '/' self.ident = "Australian Landscape Water Balance (Root Zone)" self.name = "AWRA-L-R" self.code = "AWRA_ROOT" self.outputs = { "type": "soil moisture", "readings": { "path": self.path, "url": "", "prefix": "sm_pct", "suffix": ".nc" } }
def netcdf_names_for_date(self, when, file_name): # First, check to see if an annual archive exists archival_file = self.archive_name(when.year) if Path(archival_file).is_file(): return [archival_file] # Can we create a full years archive for this whole year? if self.consolidate_to_year_archive(when.year, file_name): return [archival_file] else: # Because some of the data is in 7 day observations, # we need to pad dates +/- 7 days to ensure we grab the correct nc files that might contain 'when' window_begin = when - dt.timedelta(7) window_end = when + dt.timedelta(7) cdf_list = [] for d in pd.date_range(window_begin, window_end): # Uncompressed cdf_list += [ p + "/" + file_name + ".gz" for p in glob.glob(Model.path() + "Weather/{}*".format(d.strftime("%Y%m%d"))) ] # Compressed cdf_list += [ p + "/" + file_name for p in glob.glob(Model.path() + "Weather/{}*".format(d.strftime("%Y%m%d"))) ] short_list = list(set(cdf_list)) # Assemble the individual components that contain that date range # Auto-magically accepts only files that actually exist return [f for f in short_list if Path(f).is_file()]
def netcdf_names_for_dates(self, start, finish): # Because some of the data is in 7 day observations, # we need to pad dates +/- 7 days to ensure we grab the correct nc files that might contain 'when' window_begin = start - dt.timedelta(7) window_end = finish + dt.timedelta(7) cdf_list = [] for d in pd.date_range(window_begin, window_end): cdf_list += [ p for p in glob.glob(Model.path() + "JASMIN/native/jasmin.vol.smc.{}.nc".format( d.strftime("%Y"))) ] return [f for f in list(set(cdf_list)) if Path(f).is_file()]
def __init__(self): self.name = "live_fuel" # TODO - Proper metadata! authors = [ Author(name="Rachel Nolan", email="*****@*****.**", organisation="Test Organisation"), Author(name="Victor Di Resco", email="*****@*****.**", organisation="Test Organisation") ] pub_date = dt.datetime(2015, 9, 9) # Which products from NASA product = "MOD09A1" version = "6" # AIO bounding box lower left longitude, lower left latitude, upper right longitude, upper right latitude. bbox = "108.0000,-45.0000,155.0000,-10.0000" self.modis_meta = product, version, bbox abstract = Abstracts("NYA") self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) self.path = os.path.abspath(Model.path() + 'Live_FM') + '/' self.ident = "Live Fuels" self.code = "LFMC" self.outputs = { "type": "fuel moisture", "readings": { "path": self.path, "url": "LiveFM", "prefix": "LFMC", "suffix": ".nc", } }
def __init__(self): self.name = "yebra" authors = [ Author(name="Marta Yebra", email="*****@*****.**", organisation= "Fenner School of Environment and Society, ANU, BNHCRC"), Author(name="Xingwen Quan", email="", organisation="School of Resources and Environment, \ University of Electronic Science and Technology of China"), Author( name="David Riaño", email="", organisation= "Center for Spatial Technologies and Remote Sensing (CSTARS)"), Author(name="Pablo Rozas Larraondo", email="", organisation="National Computational Infrastructure"), Author(name="Albert I.J.M. van Dijk", email="", organisation= "Fenner School of Environment and Society, ANU, BNHCRC") ] pub_date = dt.datetime(2018, 6, 1) abstract = Abstracts( "Fuel Moisture Content (FMC) is one of the primary drivers affecting fuel flammability that lead to fires. Satellite \ observations well-grounded with field data over the highly climatologically and ecologically diverse Australian \ region served to estimate FMC and flammability for the first time at a continental-scale. The methodology \ includes a physically-based retrieval model to estimate FMC from MODIS (Moderate Resolution Imaging \ Spectrometer) reflectance data using radiative transfer model inversion. The algorithm was evaluated using 360 \ observations at 32 locations around Australia with mean accuracy for the studied land cover classes (grassland, \ shrubland, and forest) close to those obtained elsewhere (r 2 = 0.58, RMSE = 40%) but without site-specific \ calibration. Logistic regression models were developed to generate a flammability index, trained on fire events \ mapped in the MODIS burned area product and four predictor variables calculated from the FMC estimates. The \ selected predictor variables were actual FMC corresponding to the 8-day and 16-day period before burning; the \ same but expressed as an anomaly from the long-term mean for that date; and the FMC change between the two \ successive 8-day periods before burning. Separate logistic regression models were developed for grassland, \ shrubland and forest. The models obtained an “Area Under the Curve” calculated from the Receiver Operating \ Characteristic plot method of 0.70, 0.78 and 0.71, respectively, indicating reasonable skill in fire risk prediction." ) self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["profile"], doi="http://dx.doi.org/10.1016/j.rse.2018.04.053", abstract=abstract) self.mode = "wet" # "wet" or "dry" self.ident = "Yebra" self.code = "LVMC" self.path = os.path.abspath(Model.path() + 'Yebra') + '/' self.output_path = os.path.abspath(self.path + "c6") + '/' self.data_path = self.output_path # Metadata about initialisation for use in ModelSchema self.parameters = {} self.outputs = { "type": "fuel moisture", "readings": { "prefix": "fmc_mean", "path": self.output_path, "suffix": ".nc" } }
def __init__(self): self.name = "dead_fuel" # TODO - Proper metadata! authors = [ Author(name="Rachel Nolan", email="", organisation="Hawkesbury Institute for the Environment, Western Sydney University"), Author(name="Víctor Resco de Dios", email="", organisation="Hawkesbury Institute for the Environment, Western Sydney University"), Author(name="Matthias M. Boer", email="", organisation="Hawkesbury Institute for the Environment, Western Sydney University"), Author(name="Gabriele Caccamo", email="", organisation="Hawkesbury Institute for the Environment, Western Sydney University"), Author(name="Matthias M. Boer", email="", organisation="Hawkesbury Institute for the Environment, Western Sydney University"), Author(name="Michael L. Goulden", email="", organisation="Department of Earth System Science, University of California"), Author(name="Ross A. Bradstock", email="", organisation="Centre for Environmental Risk Management of Bushfires, Centre for Sustainable Ecosystem Solutions, University of Wollongong") ] pub_date = dt.datetime(2015, 12, 9) abstract = Abstracts("Spatially explicit predictions of fuel moisture content are crucial for quantifying fire danger indices and as inputs \ to fire behaviour models. Remotely sensed predictions of fuel moisture have typically focused on live fuels; but \ regional estimates of dead fuel moisture have been less common. Here we develop and test the spatial application \ of a recently developed dead fuel moisture model, which is based on the exponential decline of fine fuel moisture \ with increasing vapour pressure deficit (D). We first compare the performance of two existing approaches to pre- \ dict D from satellite observations. We then use remotely sensed D, as well as D estimated from gridded daily \ weather observations, to predict dead fuel moisture. We calibrate and test the model at a woodland site in \ South East Australia, and then test the model at a range of sites in South East Australia and Southern California \ that vary in vegetation type, mean annual precipitation (129–1404 mm year −1 ) and leaf area index (0.1–5.7). \ We found that D modelled from remotely sensed land surface temperature performed slightly better than a \ model which also included total precipitable water (MAE b 1.16 kPa and 1.62 kPa respectively). D calculated \ with observations from the Moderate Resolution Imaging Spectroradiometer (MODIS) on the Terra satellite \ was under-predicted in areas with low leaf area index. Both D from remotely sensed data and gridded weather \ station data were good predictors of the moisture content of dead suspended fuels at validation sites, with \ mean absolute errors less than 3.9% and 6.0% respectively. The occurrence of data gaps in remotely sensed \ time series presents an obstacle to this approach, and assimilated or extrapolated meteorological observations \ may offer better continuity.") self.metadata = ModelMetaData(authors=authors, published_date=pub_date, fuel_types=["surface"], doi="http://dx.doi.org/10.1016/j.rse.2015.12.010", abstract=abstract) # Prefixes vapour_prefix = 'VP3pm' temp_prefix = 'Tmx' precipitation_prefix = 'P' dead_fuel_moisture_prefix = 'DFMC' self.ident = "Dead Fuels" self.code = "DFMC" self.path = os.path.abspath(Model.path() + 'Dead_FM') + '/' vapour_url = "http://www.bom.gov.au/web03/ncc/www/awap/vprp/vprph15/daily/grid/0.05/history/nat/" max_avg_temp_url = "http://www.bom.gov.au/web03/ncc/www/awap/temperature/maxave/daily/grid/0.05/history/nat/" precipitation_url = "http://www.bom.gov.au/web03/ncc/www/awap/rainfall/totals/daily/grid/0.05/history/nat/" vapour_path = self.path + vapour_prefix + "/" max_avg_temp_path = self.path + temp_prefix + "/" precipitation_path = self.path + precipitation_prefix + "/" self.tolerance = 0.06 # As a percentage accuracy self.parameters = { "vapour pressure": { "var": "VP3pm", "path": vapour_path, "url": vapour_url, "prefix": vapour_prefix, "suffix": ".grid", "dataset": ".grid.nc", "compression_suffix": ".Z" }, "maximum average temperature": { "var": "T", "path": max_avg_temp_path, "url": max_avg_temp_url, "prefix": temp_prefix, "suffix": ".grid", "dataset": ".grid.nc", "compression_suffix": ".Z" }, "precipitation": { "var": "P", "path": precipitation_path, "url": precipitation_url, "prefix": precipitation_prefix, "suffix": ".grid", "dataset": ".grid.nc", "compression_suffix": ".Z" } } self.outputs = { "type": "fuel moisture", "readings": { "path": self.path + dead_fuel_moisture_prefix + "/", "url": "", "prefix": dead_fuel_moisture_prefix, "suffix": ".nc", } }
def __init__(self): self.name = "matthews" # TODO - Proper metadata! authors = [ Author(name="Stuart Matthews", email="*****@*****.**", organisation="RFS") ] pub_date = dt.datetime(2015, 6, 1) abstract = Abstracts( "This paper presents the first complete process-based model for fuel moisture in the litter layer. \ The model predicts fuel moisture by modelling the energy and water budgets of the litter, intercepted precipitation, \ and air spaces in the litter. The model was tested against measurements of fuel moisture from two sets of field \ observations, one made in Eucalyptus mallee-heath under dry conditions and the other during a rainy period in \ Eucalyptus obliqua forest. The model correctly predicted minimum and maximum fuel moisture content and the \ timing of minima and maxima in the mallee-heath. Under wet conditions, wetting and drying of the litter profile \ were correctly predicted but wetting of the surface litter was over-predicted. The structure of the model and the \ dependence of predictions on model parameters were examined using sensitivity and parameter estimation studies. \ The results indicated that it should be possible to adapt the model to any forest type by specifying a limited number \ of parameters. A need for further experimental research on the wetting of litter during rain was also identified." ) self.metadata = ModelMetaData( authors=authors, published_date=pub_date, fuel_types=["profile"], doi="http://dx.doi.org/10.13140/RG.2.2.36184.70403", abstract=abstract) self.mode = "wet" # "wet" or "dry" self.ident = "Matthews" self.code = "PFMC" self.path = os.path.abspath(Model.path() + 'Matthews') + '/' self.output_path = os.path.abspath(self.path + "PFMC") + '/' self.data_path = os.path.abspath(Model.path() + "Weather") + '/' self.type = "broadscale" # Metadata about initialisation for use in ModelSchema self.parameters = { "mode": self.mode, "path": os.path.normpath("Data/"), # TODO "data_path": self.data_path, # TODO "relative humidity": { "var": "RH_SFC", "path": self.data_path, "prefix": "RH_SFC", "suffix": ".nc", "dataset": ".nc", "compression_suffix": ".gz" }, "temperature": { "var": "T_SFC", "path": self.data_path, "prefix": "T_SFC", "suffix": ".nc", "dataset": ".nc", "compression_suffix": ".gz" }, "wind magnitude": { "var": "Wind_Mag_SFC", "path": self.data_path, "prefix": "Wind_Mag_SFC", "suffix": ".nc", "dataset": ".nc", "compression_suffix": ".gz" }, "precipitation": { "var": "DailyPrecip50Pct_SFC", "path": self.data_path, "prefix": "DailyPrecip50Pct_SFC", "suffix": ".nc", "dataset": ".nc", "compression_suffix": ".gz" }, "solar radiation": { "var": "Sky_SFC", "path": self.data_path, "prefix": "Sky_SFC", "suffix": ".nc", "dataset": ".nc", "compression_suffix": ".gz" } } self.outputs = { "type": "fuel moisture", "readings": { "prefix": "MFMC", "path": self.output_path, "suffix": ".nc" }, "grid": { "fmc_grid_output_file_name": os.path.join(self.data_path, "fmc_grid.pkl") } } self.model = LfmcDryness(self.parameters, self.outputs)