Esempio n. 1
0
    def netcdf_names_for_date(self, when, file_name):

        # First, check to see if an annual archive exists
        archival_file = self.archive_name(when.year)
        if Path(archival_file).is_file():
            return [archival_file]

        # Can we create a full years archive for this whole year?
        if self.consolidate_to_year_archive(when.year, file_name):
            return [archival_file]
        else:

            # Because some of the data is in 7 day observations,
            # we need to pad dates +/- 7 days to ensure we grab the correct nc files that might contain 'when'
            window_begin = when - dt.timedelta(7)
            window_end = when + dt.timedelta(7)
            cdf_list = []

            for d in pd.date_range(window_begin, window_end):
                cdf_list += [
                    p + "/" + file_name for p in
                    glob.glob(Model.path() +
                              "Weather/{}*".format(d.strftime("%Y%m%d")))
                ]

            short_list = list(set(cdf_list))
            # Assemble the individual components that contain that date range
            return [f for f in short_list if Path(f).is_file()]
Esempio n. 2
0
    def __init__(self):
        self.name = "df"

        # TODO - Proper metadata!
        authors = [Author(name="", email="", organisation="")]
        pub_date = dt.datetime(2015, 9, 9)
        abstract = Abstracts("")
        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010",
            abstract=abstract)

        self.path = os.path.abspath(Model.path() + 'DF') + '/'
        self.ident = "Drought Factor"
        self.code = "DF"
        self.outputs = {
            "type": "index",
            "readings": {
                "path": self.path,
                "url": "",
                "prefix": "DF_SFC",
                "suffix": ".nc"
            }
        }
    def __init__(self):
        self.name = "temperature"

        # TODO - Proper metadata!
        authors = [
            Author(name="Test1",
                   email="*****@*****.**",
                   organisation="Test Organisation"),
            Author(name="Test2",
                   email="*****@*****.**",
                   organisation="Test Organisation"),
            Author(name="Test3",
                   email="*****@*****.**",
                   organisation="Test Organisation")
        ]
        pub_date = dt.datetime(2015, 9, 9)

        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010")

        self.path = os.path.abspath(Model.path() + 'Weather') + '/'
        self.crs = "EPSG:3111"
        self.outputs = {
            "type": "index",
            "readings": {
                "path": self.path,
                "url": "",
                "prefix": "Tmx",
                "suffix": ".nc"
            }
        }
Esempio n. 4
0
    def __init__(self):
        self.name = "live_fuel"

        # TODO - Proper metadata!
        authors = [
            Author(name="Test1",
                   email="*****@*****.**",
                   organisation="Test Organisation"),
            Author(name="Test2",
                   email="*****@*****.**",
                   organisation="Test Organisation"),
            Author(name="Test3",
                   email="*****@*****.**",
                   organisation="Test Organisation")
        ]
        pub_date = dt.datetime(2015, 9, 9)

        # Which products from NASA
        product = "MOD09A1"
        version = "6"

        # AIO bounding box lower left longitude, lower left latitude, upper right longitude, upper right latitude.
        bbox = "108.0000,-45.0000,155.0000,-10.0000"

        self.modis_meta = product, version, bbox

        abstract = Abstracts("NYA")

        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010",
            abstract=abstract)

        self.path = os.path.abspath(Model.path() + 'Live_FM') + '/'
        self.ident = "Live Fuels"
        self.code = "LFMC"
        self.parameters = {
            "surface relectance band": {
                "var": "SRB",
                "path": "",
                "url": "",
                "prefix": "SRB",
                "suffix": ".hdf",
                "dataset": ".hdf",
                "compression_suffix": ".gz"
            }
        }
        self.outputs = {
            "type": "fuel moisture",
            "readings": {
                "path": "LiveFM",
                "url": "LiveFM",
                "prefix": "LFMC",
                "suffix": "_lfmc.nc",
            }
        }

        self.storage_engine = SwiftStorage()
Esempio n. 5
0
    def __init__(self):
        self.name = "jasmin"

        # TODO - Proper metadata!
        authors = [
            Author(name="Imtiaz Dharssi",
                   email="",
                   organisation="Bureau of Meteorology, Australia"),
            Author(name="Vinodkumar",
                   email="",
                   organisation="Bureau of Meteorology, Australia")
        ]
        pub_date = dt.datetime(2017, 10, 1)
        abstract = Abstracts(
            "Accurate soil dryness information is essential for the calculation of accurate fire danger \
                ratings, fire behavior prediction, flood forecasting and landslip warnings. Soil dryness \
                also strongly influences temperatures and heatwave development by controlling the \
                partitioning of net surface radiation into sensible, latent and ground heat fluxes. Rainfall \
                forecasts are crucial for many applications and many studies suggest that soil dryness \
                can significantly influence rainfall. Currently, soil dryness for fire danger prediction in \
                Australia is estimated using very simple water balance models developed in the 1960s \
                that ignore many important factors such as incident solar radiation, soil types, vegeta- \
                tion height and root depth. This work presents a prototype high resolution soil moisture \
                analysis system based around the Joint UK Land Environment System (JULES) land \
                surface model. This prototype system is called the JULES based Australian Soil Mois- \
                ture INformation (JASMIN) system. The JASMIN system can include data from many \
                sources; such as surface observations of rainfall, temperature, dew-point temperature, \
                wind speed, surface pressure as well as satellite derived measurements of rainfall, sur- \
                face soil moisture, downward surface short-wave radiation, skin temperature, leaf area \
                index and tree heights. The JASMIN system estimates soil moisture on four soil layers \
                over the top 3 meters of soil, the surface layer has a thickness of 10 cm. The system \
                takes into account the effect of different vegetation types, root depth, stomatal resis- \
                tance and spatially varying soil texture. The analysis system has a one hour time-step \
                with daily updating. For the surface soil layer, verification against ground based soil \
                moisture observations from the OzNet, CosmOz and OzFlux networks shows that the \
                JASMIN system is significantly more accurate than other soil moisture analysis sys- \
                tem used at the Bureau of Meteorology. For the root-zone, the JASMIN system has \
                similar skill to other commonly used soil moisture analysis systems. The Extended \
                Triple Collocation (ETC) verification method also confirms the high skill of the JASMIN \
                system.")
        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010",
            abstract=abstract)

        self.path = os.path.abspath(Model.path() + 'JASMIN') + '/'
        self.ident = "JASMIN"
        self.code = "JASMIN"
        self.outputs = {
            "type": "index",
            "readings": {
                "path": self.path,
                "url": "",
                "prefix": "smd",
                "suffix": ".nc"
            }
        }
Esempio n. 6
0
    def __init__(self):
        self.name = "gfdi"

        # TODO - Proper metadata!
        authors = [
            Author(name="Danielle Martin",
                   email="",
                   organisation="Country Fire Authority"),
            Author(name="Alex Chen",
                   email="",
                   organisation="Country Fire Authority"),
            Author(name="David Nichols",
                   email="",
                   organisation="Country Fire Authority"),
            Author(name="Rachel Bessell",
                   email="",
                   organisation="Country Fire Authority"),
            Author(name="Susan Kiddie",
                   email="",
                   organisation="Country Fire Authority"),
            Author(name="Jude Alexander",
                   email="",
                   organisation="Country Fire Authority")
        ]
        pub_date = dt.datetime(2015, 9, 9)
        abstract = Abstracts(
            "Depending on the growth stage of grass, certain physiological characteristics, such \
                            as water content and degree of curing (senescence), determine the susceptibility of \
                            grass to ignite or to propagate a fire. Grassland curing is an integral component of \
                            the Grassland Fire Danger Index (GFDI), which is used to determine the Fire Danger \
                            Ratings (FDRs). In providing input for the GFDI for the whole state of Victoria, this \
                            paper reports the development of two amalgamated products by the Country Fire \
                            Authority (CFA): (i) an automated web-based system which integrates weekly field \
                            observations with real time satellite data for operational grassland curing mapping, \
                            and (ii) a satellite model based on historical satellite data and historical field \
                            observations. Both products combined will provide an improved state-wide map of \
                            curing tailored for Victorian grasslands.")

        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010",
            abstract=abstract)

        self.ident = "Grass Fire Danger"
        self.code = "GFDI"
        self.path = os.path.abspath(Model.path() + 'GFDI') + '/'
        self.crs = "EPSG:3111"
        self.outputs = {
            "type": "index",
            "readings": {
                "path": self.path,
                "url": "",
                "prefix": "GFDI_SFC",
                "suffix": ".nc"
            }
        }
Esempio n. 7
0
    def consolidate_to_year_archive(self, year, file_name):

        y_begin = dt.datetime(year, 1, 1)
        y_end = dt.datetime(year, 12, 31)
        fl = []

        # Don't even attempt if still in this year
        # ie., must be a year in the past
        if y_end.year >= dt.datetime.now().year:
            return False

        for d in pd.date_range(y_begin, y_end):
            fl += [
                y + '/' + file_name
                for y in glob.glob(Model.path() +
                                   "Weather/{}*".format(d.strftime("%Y%m%d")))
            ]

        minimal_file_list = list(set(fl))
        files = [f for f in minimal_file_list if Path(f).is_file()]

        file_list = list(files)

        if len(file_list) > 0:
            file_list.sort()
            xr1 = xr.open_dataset(file_list.pop(0))
            while len(file_list) > 1:
                xr2 = xr.open_dataset(file_list.pop(0))
                # if dev.DEBUG:
                #     logger.debug("\n--> Loading BOM SFC TS by overwriting older data: %s" % fl[0])
                xr1 = self.load_by_overwrite(xr1, xr2)

            xr1.attrs['var_name'] = self.outputs["readings"]["prefix"]

            logger.debug(xr1)

            if xr1['time'] is None:
                logger.debug('No temporal component to DataSet?!')
                return False
            else:
                # This needs refinement to extract days worth of records instead of actual time entries
                time_records = xr1.sel(time=str(year))
                if len(time_records) >= 365:
                    # This could potentially give us 365 milliseconds/seconds/hours worth of data. TODO - Just days!!
                    xr1.to_netcdf(self.archive_name(year), format='NETCDF4')
                    return True
                else:
                    # Can't yet save the year as an archive it's incomplete
                    logger.debug('Attempted to create an annual archive for %s,'\
                                 'but the year (%d) is incomplete, containing just %d days' % (self.code,
                                                                                               year,
                                                                                               len(time_records)))
                    return False
        else:
            return False
Esempio n. 8
0
 def all_netcdfs(self):
     """
     Pattern matches potential paths where files could be stored to those that actually exist.
     Warning: Files outside this directory aren't indexed and won't get ingested.
     :param fname:
     :return:
     """
     possibles = [
         p for p in glob.glob(
             Model.path() +
             "JASMIN/rescaled/21vls/jasmin.kbdi/temporal/jasmin.kbdi.cdf_temporal.2lvls.*.nc"
         )
     ]
     return [f for f in possibles if Path(f).is_file()]
Esempio n. 9
0
    def netcdf_names_for_date(self, when, fname):
        # Because some of the data is in 7 day observations,
        # we need to pad dates +/- 7 days to ensure we grab the correct nc files that might contain 'when'
        window_begin = when - dt.timedelta(7)
        window_end = when + dt.timedelta(7)
        cdf_list = []

        for d in pd.date_range(window_begin, window_end):
            cdf_list += [
                p + "/" + fname
                for p in glob.glob(Model.path() +
                                   "Weather/{}*".format(d.strftime("%Y%m%d")))
            ]

        return [f for f in list(set(cdf_list)) if Path(f).is_file()]
Esempio n. 10
0
    def netcdf_names_for_dates(self, start, finish):
        # Because some of the data is in 7 day observations,
        # we need to pad dates +/- 7 days to ensure we grab the correct nc files that might contain 'when'
        window_begin = start - dt.timedelta(7)
        window_end = finish + dt.timedelta(7)
        cdf_list = []

        for d in pd.date_range(window_begin, window_end):
            cdf_list += [
                p for p in glob.glob(
                    Model.path() +
                    "JASMIN/rescaled/21vls/jasmin.kbdi/temporal/jasmin.kbdi.cdf_temporal.2lvls.{}.nc"
                    .format(d.strftime("%Y")))
            ]

        return [f for f in list(set(cdf_list)) if Path(f).is_file()]
Esempio n. 11
0
    async def get_shaped_resultcube(self,
                                    shape_query: ShapeQuery) -> xr.DataArray:

        fs = set()
        for when in shape_query.temporal.dates():
            [
                fs.add(file) for file in self.netcdf_name_for_date(when)
                if Path(file).is_file()
            ]

        fl = list(fs)
        xr1 = xr.DataArray(())
        if dev.DEBUG:
            [logger.debug("\n--> Will load: %s" % f) for f in fl]

        # Load these files in date order overwriting older data with the newer
        if len(fl) > 0:
            fl.sort()
            xr1 = xr.open_dataset(fl.pop(0))
            while len(fl) > 1:
                xr2 = xr.open_dataset(fl.pop(0))
                if dev.DEBUG:
                    logger.debug(
                        "\n--> Loading BOM SFC TS by overwriting older data: %s"
                        % fl[0])
                xr1 = self.load_by_overwrite(xr1, xr2)

            xr1.attrs['var_name'] = self.outputs["readings"]["prefix"]
            xr1.to_netcdf(Model.path() +
                          'temp/latest_{}_query.nc'.format(self.name),
                          format='NETCDF4')

            if dev.DEBUG:
                # Include forecasts!
                logger.debug(xr1)
                ts = xr1.sel(time=slice(
                    shape_query.temporal.start.strftime("%Y-%m-%d"), None))
            else:
                ts = xr1.sel(time=slice(
                    shape_query.temporal.start.strftime("%Y-%m-%d"),
                    shape_query.temporal.finish.strftime("%Y-%m-%d")))

            return shape_query.apply_mask_to(ts)
        else:
            raise FileNotFoundError('No data exists for that date range')
Esempio n. 12
0
    def __init__(self):
        self.name = "awra"

        # TODO - Proper metadata!
        authors = [
            Author(name="BOM",
                   email="*****@*****.**",
                   organisation="Bureau of Meteorology, Australia")
        ]
        pub_date = dt.datetime(2015, 9, 9)
        abstract = Abstracts(
            "The information presented on the Australian Landscape Water Balance website is produced by \
         the Bureau's operational Australian Water Resources Assessment Landscape model (AWRA-L). AWRA-L is a daily 0.05°\
          grid-based, distributed water balance model, conceptualised as a small unimpaired catchment. It simulates the\
           flow of water through the landscape from the rainfall entering the grid cell through the vegetation and soil\
            moisture stores and then out of the grid cell through evapotranspiration, runoff or deep drainage to the groundwater.\n \
        Each spatial unit (grid cell) in AWRA-L is divided into two hydrological response units (HRU) representing deep \
        rooted vegetation (trees) and shallow rooted vegetation (grass). Hydrological processes are modelled separately \
        for each HRU, then the resulting fluxes or stores are combined to give cell outputs. Hydrologically, these two \
        HRUs differ in their aerodynamic control of evaporation and their interception capacities but the main difference\
         is in their degree of access to different soil layers. The AWRA-L model has three soil layers (upper: 0–10 cm, \
         lower: 10–100 cm, and deep: 1–6 m). The shallow rooted vegetation has access to subsurface soil moisture in the \
         upper and lower soil stores only, while the deep rooted vegetation also has access to moisture in the deep store."
        )

        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010",
            abstract=abstract)

        self.path = os.path.abspath(Model.path() + 'AWRA-L') + '/'
        self.ident = "AWRA-L"
        self.code = "AWRA"
        self.outputs = {
            "type": "soil moisture",
            "readings": {
                "path": self.path,
                "url": "",
                "prefix": "sm_pct",
                "suffix": ".nc"
            }
        }
Esempio n. 13
0
    def __init__(self):

        self.name = "yebra"

        # TODO - Proper metadata!
        authors = [
            Author(name="Marta Yebra",
                   email="*****@*****.**",
                   organisation="ANU")
        ]

        pub_date = dt.datetime(2015, 6, 1)

        abstract = Abstracts("")

        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["profile"],
            doi="http://dx.doi.org/10.13140/RG.2.2.36184.70403",
            abstract=abstract)

        self.mode = "wet"  # "wet" or "dry"
        self.ident = "Yebra"
        self.code = "LVMC"
        self.path = os.path.abspath(Model.path() + 'Yebra') + '/'
        self.output_path = os.path.abspath(self.path + "LVMC") + '/'
        self.data_path = self.output_path

        # Metadata about initialisation for use in ModelSchema
        self.parameters = {}

        self.outputs = {
            "type": "fuel moisture",
            "readings": {
                "prefix": "LVMC",
                "path": self.output_path,
                "suffix": ".nc"
            }
        }
Esempio n. 14
0
    def __init__(self):

        self.name = "dead_fuel"

        # TODO - Proper metadata!
        authors = [
            Author(
                name="Rachel Nolan",
                email="",
                organisation=
                "Hawkesbury Institute for the Environment, Western Sydney University"
            ),
            Author(
                name="Víctor Resco de Dios",
                email="",
                organisation=
                "Hawkesbury Institute for the Environment, Western Sydney University"
            ),
            Author(
                name="Matthias M. Boer",
                email="",
                organisation=
                "Hawkesbury Institute for the Environment, Western Sydney University"
            ),
            Author(
                name="Gabriele Caccamo",
                email="",
                organisation=
                "Hawkesbury Institute for the Environment, Western Sydney University"
            ),
            Author(
                name="Matthias M. Boer",
                email="",
                organisation=
                "Hawkesbury Institute for the Environment, Western Sydney University"
            ),
            Author(
                name="Michael L. Goulden",
                email="",
                organisation=
                "Department of Earth System Science, University of California"
            ),
            Author(
                name="Ross A. Bradstock",
                email="",
                organisation=
                "Centre for Environmental Risk Management of Bushfires, Centre for Sustainable Ecosystem Solutions, University of Wollongong"
            )
        ]

        pub_date = dt.datetime(2015, 12, 9)
        abstract = Abstracts(
            "Spatially explicit predictions of fuel moisture content are crucial for quantifying fire danger indices and as inputs \
        to fire behaviour models. Remotely sensed predictions of fuel moisture have typically focused on live fuels; but \
        regional estimates of dead fuel moisture have been less common. Here we develop and test the spatial application \
        of a recently developed dead fuel moisture model, which is based on the exponential decline of fine fuel moisture \
        with increasing vapour pressure deficit (D). We first compare the performance of two existing approaches to pre- \
        dict D from satellite observations. We then use remotely sensed D, as well as D estimated from gridded daily \
        weather observations, to predict dead fuel moisture. We calibrate and test the model at a woodland site in \
        South East Australia, and then test the model at a range of sites in South East Australia and Southern California \
        that vary in vegetation type, mean annual precipitation (129–1404 mm year −1 ) and leaf area index (0.1–5.7). \
        We found that D modelled from remotely sensed land surface temperature performed slightly better than a \
        model which also included total precipitable water (MAE b 1.16 kPa and 1.62 kPa respectively). D calculated \
        with observations from the Moderate Resolution Imaging Spectroradiometer (MODIS) on the Terra satellite \
        was under-predicted in areas with low leaf area index. Both D from remotely sensed data and gridded weather \
        station data were good predictors of the moisture content of dead suspended fuels at validation sites, with \
        mean absolute errors less than 3.9% and 6.0% respectively. The occurrence of data gaps in remotely sensed \
        time series presents an obstacle to this approach, and assimilated or extrapolated meteorological observations \
        may offer better continuity.")
        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["surface"],
            doi="http://dx.doi.org/10.1016/j.rse.2015.12.010",
            abstract=abstract)

        # Prefixes
        vapour_prefix = 'VP3pm'
        temp_prefix = 'Tmx'
        precipitation_prefix = 'P'
        dead_fuel_moisture_prefix = 'DFMC'

        self.ident = "Dead Fuels"
        self.code = "DFMC"
        self.path = os.path.abspath(Model.path() + 'Dead_FM') + '/'

        vapour_url = "http://www.bom.gov.au/web03/ncc/www/awap/vprp/vprph15/daily/grid/0.05/history/nat/"
        max_avg_temp_url = "http://www.bom.gov.au/web03/ncc/www/awap/temperature/maxave/daily/grid/0.05/history/nat/"
        precipitation_url = "http://www.bom.gov.au/web03/ncc/www/awap/rainfall/totals/daily/grid/0.05/history/nat/"

        vapour_path = self.path + vapour_prefix + "/"
        max_avg_temp_path = self.path + temp_prefix + "/"
        precipitation_path = self.path + precipitation_prefix + "/"

        self.tolerance = 0.06  # As a percentage accuracy

        self.parameters = {
            "vapour pressure": {
                "var": "VP3pm",
                "path": vapour_path,
                "url": vapour_url,
                "prefix": vapour_prefix,
                "suffix": ".grid",
                "dataset": ".grid.nc",
                "compression_suffix": ".Z"
            },
            "maximum average temperature": {
                "var": "T",
                "path": max_avg_temp_path,
                "url": max_avg_temp_url,
                "prefix": temp_prefix,
                "suffix": ".grid",
                "dataset": ".grid.nc",
                "compression_suffix": ".Z"
            },
            "precipitation": {
                "var": "P",
                "path": precipitation_path,
                "url": precipitation_url,
                "prefix": precipitation_prefix,
                "suffix": ".grid",
                "dataset": ".grid.nc",
                "compression_suffix": ".Z"
            }
        }

        self.outputs = {
            "type": "fuel moisture",
            "readings": {
                "path": self.path + dead_fuel_moisture_prefix + "/",
                "url": "",
                "prefix": dead_fuel_moisture_prefix,
                "suffix": ".nc",
            }
        }
Esempio n. 15
0
    def __init__(self):

        self.name = "matthews"

        # TODO - Proper metadata!
        authors = [
            Author(name="Stuart Matthews",
                   email="*****@*****.**",
                   organisation="RFS")
        ]

        pub_date = dt.datetime(2015, 6, 1)

        abstract = Abstracts(
            "This paper presents the first complete process-based model for fuel moisture in the litter layer. \
        The model predicts fuel moisture by modelling the energy and water budgets of the litter, intercepted precipitation, \
        and air spaces in the litter. The model was tested against measurements of fuel moisture from two sets of field \
        observations, one made in Eucalyptus mallee-heath under dry conditions and the other during a rainy period in \
        Eucalyptus obliqua forest. The model correctly predicted minimum and maximum fuel moisture content and the \
        timing of minima and maxima in the mallee-heath. Under wet conditions, wetting and drying of the litter profile \
        were correctly predicted but wetting of the surface litter was over-predicted. The structure of the model and the \
        dependence of predictions on model parameters were examined using sensitivity and parameter estimation studies. \
        The results indicated that it should be possible to adapt the model to any forest type by specifying a limited number \
        of parameters. A need for further experimental research on the wetting of litter during rain was also identified."
        )

        self.metadata = ModelMetaData(
            authors=authors,
            published_date=pub_date,
            fuel_types=["profile"],
            doi="http://dx.doi.org/10.13140/RG.2.2.36184.70403",
            abstract=abstract)

        self.mode = "wet"  # "wet" or "dry"
        self.ident = "Matthews"
        self.code = "PFMC"
        self.path = os.path.abspath(Model.path() + 'Matthews') + '/'
        self.output_path = os.path.abspath(self.path + "PFMC") + '/'
        self.data_path = os.path.abspath(Model.path() + "Weather") + '/'

        self.type = "broadscale"

        # Metadata about initialisation for use in ModelSchema
        self.parameters = {
            "mode": self.mode,
            "path": os.path.normpath("Data/"),  # TODO
            "data_path": self.data_path,  # TODO
            "relative humidity": {
                "var": "RH_SFC",
                "path": self.data_path,
                "prefix": "RH_SFC",
                "suffix": ".nc",
                "dataset": ".nc",
                "compression_suffix": ".gz"
            },
            "temperature": {
                "var": "T_SFC",
                "path": self.data_path,
                "prefix": "T_SFC",
                "suffix": ".nc",
                "dataset": ".nc",
                "compression_suffix": ".gz"
            },
            "wind magnitude": {
                "var": "Wind_Mag_SFC",
                "path": self.data_path,
                "prefix": "Wind_Mag_SFC",
                "suffix": ".nc",
                "dataset": ".nc",
                "compression_suffix": ".gz"
            },
            "precipitation": {
                "var": "DailyPrecip50Pct_SFC",
                "path": self.data_path,
                "prefix": "DailyPrecip50Pct_SFC",
                "suffix": ".nc",
                "dataset": ".nc",
                "compression_suffix": ".gz"
            },
            "solar radiation": {
                "var": "Sky_SFC",
                "path": self.data_path,
                "prefix": "Sky_SFC",
                "suffix": ".nc",
                "dataset": ".nc",
                "compression_suffix": ".gz"
            }
        }

        self.outputs = {
            "type": "fuel moisture",
            "readings": {
                "prefix": "MFMC",
                "path": self.output_path,
                "suffix": ".nc"
            },
            "grid": {
                "fmc_grid_output_file_name":
                os.path.join(self.data_path, "fmc_grid.pkl")
            }
        }

        self.model = LfmcDryness(self.parameters, self.outputs)
 def netcdf_name_for_date(self, when):
     return os.path.abspath(
         Model.path() +
         "/Dead_FM/Tmx/Tmx_{}.grid.nc".format(when.strftime("%Y%m%d")))