def stdtoswmm5_cli(start_date=None, end_date=None, input_ts="-"): """Take the toolbox standard format and return SWMM5 format. Toolbox standard:: Datetime, Column_Name 2000-01-01 00:00:00 , 45.6 2000-01-01 01:00:00 , 45.2 ... SWMM5 format:: ; comment line 01/01/2000 00:00, 45.6 01/01/2000 01:00, 45.2 ... Parameters ---------- {input_ts} {start_date} {end_date} """ tsutils._printiso( stdtoswmm5(start_date=start_date, end_date=end_date, input_ts=input_ts) )
def extract_cli(filename, *labels): """Get the time series data for a particular object and variable. Parameters ---------- {filename} {labels} """ tsutils._printiso(extract(filename, *labels))
def listvariables_cli(filename, tablefmt="csv_nos", header="default"): """List variables available for each type. The type are "subcatchment", "node", "link", "pollutant", "system". Parameters ---------- {filename} {tablefmt} {header} """ tsutils._printiso(listvariables(filename, header=header), tablefmt=tablefmt)
def listdsns_cli(wdmpath): """Print out a table describing all DSNs in the WDM. Parameters ---------- wdmpath Path and WDM filename. """ nvars = listdsns(wdmpath) collect = OrderedDict() for _, testv in nvars.items(): for key in ['DSN', 'SCENARIO', 'LOCATION', 'CONSTITUENT', 'TSTYPE', 'START_DATE', 'END_DATE', 'TCODE', 'TSTEP']: collect.setdefault(key, []).append(testv[key.lower()]) return tsutils._printiso(collect, tablefmt='plain')
def catalog_cli(filename, itemtype="", tablefmt="csv_nos", header="default"): """List the catalog of objects in output file. This catalog list is all of the labels that can be used in the extract routine. Parameters ---------- {filename} {itemtype} {tablefmt} {header} """ tsutils._printiso( catalog(filename, itemtype=itemtype, header=header), tablefmt=tablefmt )
def listdsns_cli(wdmpath): """Print out a table describing all DSNs in the WDM. Parameters ---------- wdmpath Path and WDM filename. """ nvars = listdsns(wdmpath) collect = OrderedDict() for _, testv in nvars.items(): for key in [ "DSN", "SCENARIO", "LOCATION", "CONSTITUENT", "TSTYPE", "START_DATE", "END_DATE", "TCODE", "TSTEP", ]: collect.setdefault(key, []).append(testv[key.lower()]) return tsutils._printiso(collect, tablefmt="plain")
def dump_cli(hbnfilename, time_stamp='begin'): ''' Prints out ALL data from a HSPF binary output file. Parameters ---------- {hbnfilename} time_stamp [optional, default is 'begin'] For the interval defines the location of the time stamp. If set to 'begin', the time stamp is at the begining of the interval. If set to any other string, the reported time stamp will represent the end of the interval. Default is 'begin'.Z ''' tsutils._printiso(dump(hbnfilename, time_stamp=time_stamp))
def catalog_cli(hbnfilename, tablefmt="simple", header="default"): """ Prints out a catalog of data sets in the binary file. The first four items of each line can be used as labels with the 'extract' command to identify time-series in the binary file. Parameters ---------- {hbnfilename} {tablefmt} {header} """ if header == "default": header = ["LUE", "LC", "GROUP", "VAR", "TC", "START", "END", "TC"] tsutils._printiso(catalog(hbnfilename), tablefmt=tablefmt, headers=header)
def dump_cli(hbnfilename, time_stamp="begin"): """ Prints out ALL data from a HSPF binary output file. Parameters ---------- {hbnfilename} time_stamp [optional, default is 'begin'] For the interval defines the location of the time stamp. If set to 'begin', the time stamp is at the begining of the interval. If set to any other string, the reported time stamp will represent the end of the interval. Default is 'begin'.Z """ tsutils._printiso(dump(hbnfilename, time_stamp=time_stamp))
def listdetail_cli(filename, itemtype, name="", tablefmt="simple", header="default"): """List nodes and metadata in output file. Parameters ---------- {filename} {itemtype} name : str [optional, default is ''] Specific name to print only that entry. This can be looked up using 'listvariables'. {tablefmt} {header} """ tsutils._printiso( listdetail(filename, itemtype, name=name, header=header), tablefmt=tablefmt )
def catalog_cli(hbnfilename, tablefmt='simple', header='default'): ''' Prints out a catalog of data sets in the binary file. The first four items of each line can be used as labels with the 'extract' command to identify time-series in the binary file. Parameters ---------- {hbnfilename} {tablefmt} {header} ''' if header == "default": header = ['LUE', 'LC', 'GROUP', 'VAR', 'TC', 'START', 'END', 'TC'] tsutils._printiso(catalog(hbnfilename), tablefmt=tablefmt, headers=header)
def extract_cli(start_date=None, end_date=None, *wdmpath): """Print out DSN data to the screen with ISO-8601 dates. Parameters ---------- wdmpath Path and WDM filename followed by space separated list of DSNs. For example:: 'file.wdm 234 345 456' OR `wdmpath` can be space separated sets of 'wdmpath,dsn'. 'file.wdm,101 file2.wdm,104 file.wdm,227' {start_date} {end_date} """ return tsutils._printiso( extract(*wdmpath, start_date=start_date, end_date=end_date))
def extract_cli(start_date=None, end_date=None, *wdmpath): """Print out DSN data to the screen with ISO-8601 dates. Parameters ---------- wdmpath Path and WDM filename followed by space separated list of DSNs. For example:: 'file.wdm 234 345 456' OR `wdmpath` can be space separated sets of 'wdmpath,dsn'. 'file.wdm,101 file2.wdm,104 file.wdm,227' {start_date} {end_date} """ return tsutils._printiso(extract(*wdmpath, start_date=start_date, end_date=end_date))
def extract_cli(hbnfilename, interval, *labels, **kwds): r"""Prints out data to the screen from a HSPF binary output file. Parameters ---------- {hbnfilename} interval: str One of 'yearly', 'monthly', 'daily', or 'BIVL'. The 'BIVL' option is a sub-daily interval defined in the UCI file. Typically 'BIVL' is used for hourly output, but can be set to any value that evenly divides into a day. labels The remaining arguments uniquely identify a time-series in the binary file. The format is 'OPERATIONTYPE,ID,VARIABLE_GROUP,VARIABLE'. For example: 'PERLND,101,PWATER,UZS IMPLND,101,IWATER,RETS' Leaving a section without an entry will wildcard that specification. To get all the PWATER variables for PERLND 101 the label would read: 'PERLND,101,PWATER,' To get TAET for all PERLNDs: 'PERLND,,,TAET' Note that there are spaces ONLY between label specifications. OPERATIONTYE can be PERLND, IMPLND, RCHRES, and BMPRAC. ID is specified in the UCI file. VARIABLE_GROUP depends on OPERATIONTYPE where:: if OPERATIONTYPE is PERLND then VARIABLEGROUP can be one of 'ATEMP', 'SNOW', 'PWATER', 'SEDMNT', 'PSTEMP', 'PWTGAS', 'PQUAL', 'MSTLAY', 'PEST', 'NITR', 'PHOS', 'TRACER' if OPERATIONTYPE is IMPLND then VARIABLEGROUP can be one of 'ATEMP', 'SNOW', 'IWATER', 'SOLIDS', 'IWTGAS', 'IQUAL' if OPERATIONTYPE is RCHRES then VARIABLEGROUP can be one of 'HYDR', 'CONS', 'HTRCH', 'SEDTRN', 'GQUAL', 'OXRX', 'NUTRX', 'PLANK', 'PHCARB', 'INFLOW', 'OFLOW', 'ROFLOW' if OPERATIONTYPE is BMPRAC then there is no VARIABLEGROUP and you have to leave VARIABLEGROUP as a wild card. For example, 'BMPRAC,875,,RMVOL'. kwds: [optional] Current the allowable keywords are 'time_stamp' and 'sorted'. time_stamp [optional, default is 'begin'] For the interval defines the location of the time stamp. If set to 'begin', the time stamp is at the beginning of the interval. If set to any other string, the reported time stamp will represent the end of the interval. Place after ALL labels. sorted [optional, default is False] Should ALL columns be sorted? Place after ALL labels.""" tsutils._printiso(extract(hbnfilename, interval, *labels, **kwds))
def temperature_cli( method, source_units, min_max_time="fix", mod_nighttime=False, input_ts="-", start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", temp_min_col=None, temp_max_col=None, temp_mean_col=None, lat=None, lon=None, hourly=None, max_delta=False, ): """Disaggregate daily temperature to hourly temperature. For straight disaggregation the temperature units are not relevant, however other tools in mettoolbox require metric units. You can use `source_units` and `target_units` keywords to change units. +---------------+----------------------------+ | Input Data | Description | +===============+============================+ | temp_tmin_col | Required column name or | | | number representing the | | | minimum daily temperature. | +---------------+----------------------------+ | temp_tmax_col | Required column name or | | | number representing the | | | maximum daily temperature. | +---------------+----------------------------+ | temp_mean_col | Optional column name or | | | number representing the | | | average daily temperature. | | | Default is None and if | | | None will be calculated as | | | average of `temp_tmin_col` | | | and `temp_tmax_col`. | +---------------+----------------------------+ Parameters ---------- method: str Disaggregation methods available for temperature. +---------------------+--------------------------------------+ | `method` | Description | +=====================+======================================+ | sine_min_max | Standard sine redistribution; | | | preserves Tmin and Tmax but not | | | Tmean. | +---------------------+--------------------------------------+ | sine_mean | Sine redistribution; preserves | | | Tmean and the diurnal temperature | | | range (Tmax – Tmin) but not Tmin | | | and Tmax. | +---------------------+--------------------------------------+ | mean_course_min_max | Redistribute following a prescribed | | | temperature course calculated from | | | hourly observations; preserves Tmin | | | and Tmax. Hourly CSV filename | | | specified with the `hourly` keyword. | +---------------------+--------------------------------------+ | mean_course_mean | Redistribute following a prescribed | | | temperature course calculated from | | | hourly observations; preserves | | | Tmean and the diurnal temperature | | | range. Hourly CSV filename specified | | | with the `hourly` keyword. | +---------------------+--------------------------------------+ {psource_units} min_max_time: str +----------------+------------------------------------------+ | `min_max_time` | Description | +================+==========================================+ | fix | The diurnal course of temperature is | | | fixed without any seasonal variations. | +----------------+------------------------------------------+ | sun_loc | The diurnal course of temperature is | | | modelled based on sunrise, noon and | | | sunset calculations. | +----------------+------------------------------------------+ | sun_loc_shift | This option activates empirical | | | corrections of the ideal course modelled | | | by sun_loc | +----------------+------------------------------------------+ mod_nighttime: bool Allows one to apply a linear interpolation of night time values, which proves preferable during polar nights. {input_ts} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} temp_min_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily minimum temperature. temp_max_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily maximum temperature. temp_mean_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily mean temperature. If None will be estimated by the average of `temp_min_col` and `temp_max_col`. lat: float The latitude of the station. Required if `min_max_time` is "sun_loc" or "sun_loc_shift". lon: float The longitude of the station. Required if `min_max_time` is "sun_loc" or "sun_loc_shift". hourly: str File name that contains the hourly time series of temperatures to use when `method` is "mean_course_min" or "mean_course_mean" or when `max_delta` is True. max_delta: bool Uses maximum delta of hourly values for each month to constrain the disaggregated hourly temperature values. If set to True requires an hourly time-series filename specified with the `hourly` keyword. """ tsutils._printiso( disaggregate.temperature( method, source_units, input_ts=input_ts, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, target_units=target_units, print_input=print_input, min_max_time=min_max_time, mod_nighttime=mod_nighttime, temp_min_col=temp_min_col, temp_max_col=temp_max_col, temp_mean_col=temp_mean_col, lat=lat, lon=lon, hourly=hourly, max_delta=max_delta, ), tablefmt=tablefmt, )
def wind_speed_cli( method, source_units, input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", a=None, b=None, t_shift=None, ): """Disaggregate daily wind speed to hourly wind speed. Parameters ---------- method: str Disaggregation methods available for wind speed. +----------+------------------------------------------------+ | `method` | Description | +==========+================================================+ | equal | If this method is chosen, the daily average | | | wind speed is assumed to be valid for each | | | hour on that day. | +----------+------------------------------------------------+ | cosine | The cosine function option simulates a diurnal | | | course of wind speed and requires calibration | | | (calc_wind_stats()). | +----------+------------------------------------------------+ | random | This option is a stochastic method that draws | | | random numbers to disaggregate wind speed | | | taking into account the daily average (no | | | parameter estimation required). | +----------+------------------------------------------------+ {psource_units} {input_ts} {columns} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} a: float Parameter `a` when method is equal to "cosine". b: float Parameter `b` when method is equal to "cosine". t_shift: float Parameter `t_shift` when method is equal to "cosine". """ tsutils._printiso( disaggregate.wind_speed( method, input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, source_units=source_units, target_units=target_units, print_input=print_input, a=a, b=b, t_shift=t_shift, ), tablefmt=tablefmt, )
def extract_cli(hbnfilename, interval, *labels, **kwds): r"""Prints out data to the screen from a HSPF binary output file. Parameters ---------- {hbnfilename} interval: str One of 'yearly', 'monthly', 'daily', or 'BIVL'. The 'BIVL' option is a sub-daily interval defined in the UCI file. Typically 'BIVL' is used for hourly output, but can be set to any value that evenly divides into a day. labels: str The remaining arguments uniquely identify a time-series in the binary file. The format is 'OPERATIONTYPE,ID,VARIABLE_GROUP,VARIABLE'. For example: 'PERLND,101,PWATER,UZS IMPLND,101,IWATER,RETS' Leaving a section without an entry will wildcard that specification. To get all the PWATER variables for PERLND 101 the label would read: 'PERLND,101,PWATER,' To get TAET for all PERLNDs: 'PERLND,,,TAET' Note that there are spaces ONLY between label specifications. OPERATIONTYE can be PERLND, IMPLND, RCHRES, and BMPRAC. ID is specified in the UCI file. VARIABLE_GROUP depends on OPERATIONTYPE where:: if OPERATIONTYPE is PERLND then VARIABLEGROUP can be one of 'ATEMP', 'SNOW', 'PWATER', 'SEDMNT', 'PSTEMP', 'PWTGAS', 'PQUAL', 'MSTLAY', 'PEST', 'NITR', 'PHOS', 'TRACER' if OPERATIONTYPE is IMPLND then VARIABLEGROUP can be one of 'ATEMP', 'SNOW', 'IWATER', 'SOLIDS', 'IWTGAS', 'IQUAL' if OPERATIONTYPE is RCHRES then VARIABLEGROUP can be one of 'HYDR', 'CONS', 'HTRCH', 'SEDTRN', 'GQUAL', 'OXRX', 'NUTRX', 'PLANK', 'PHCARB', 'INFLOW', 'OFLOW', 'ROFLOW' if OPERATIONTYPE is BMPRAC then there is no VARIABLEGROUP and you have to leave VARIABLEGROUP as a wild card. For example, 'BMPRAC,875,,RMVOL'. kwds: Current the allowable keywords are 'time_stamp' and 'sorted'. time_stamp: [optional, default is 'begin'] For the interval defines the location of the time stamp. If set to 'begin', the time stamp is at the beginning of the interval. If set to any other string, the reported time stamp will represent the end of the interval. Place after ALL labels. sorted: [optional, default is False] Should ALL columns be sorted? Place after ALL labels.""" tsutils._printiso(extract(hbnfilename, interval, *labels, **kwds))
def oudin_form_cli( lat, temp_min_col=None, temp_max_col=None, temp_mean_col=None, k1=100, k2=5, source_units=None, input_ts="-", start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", ): """Oudin PET: f(Tavg, latitude) This model uses daily mean temperature to estimate PET based on the Julian day of year and latitude. The later are used to estimate extraterrestrial solar radiation. Average daily temperature can be supplied or if not, calculated by (Tmax+Tmin)/2. The constants `k1` and `k2` are used in the generic form of the equation to adjust the PET. The defaults for k1 and k2 for this function are from Oudin with k1=100 and k2=5. Jensen-Haise presented k1=40, and k2=0, Mcguiness presented k1=68, and k2=5. The k2 parameter represents the point in degrees C at which potential evaporation is 0. The k1 parameter is a scaling parameter. Reference, Ludovic Oudin et al, Which potential evapotranspiration input for a lumped rainfall–runoff model?: Part 2—Towards a simple and efficient potential evapotranspiration model for rainfall–runoff modelling, Journal of Hydrology, Volume 303, Issues 1–4, 1 March 2005, Pages 290-306, ISSN 0022-1694, http://dx.doi.org/10.1016/j.jhydrol.2004.08.026. (http://www.sciencedirect.com/science/article/pii/S0022169404004056) Parameters ---------- lat: float The latitude of the station. Positive specifies the Northern Hemisphere, and negative values represent the Southern Hemisphere. temp_min_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily minimum temperature. temp_max_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily maximum temperature. source_units If unit is specified for the column as the second field of a ':' delimited column name, then the specified units and the 'source_units' must match exactly. Any unit string compatible with the 'pint' library can be used. Since there are two required input columns ("temp_min_col" and "temp_max_col") and one optional input column ("temp_mean_col") you need to supply units for each input column in `source_units`. Command line:: mettoolbox pet oudin_form 24 1 2 degF,degF < tmin_tmax_data.csv Python:: from mettoolbox import mettoolbox as mt df = mt.pet.oudin_form(24, 1, 2, ["degF", "degF"], input_ts="tmin_tmax_data.csv") {input_ts} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} temp_mean_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily mean temperature. If None will be estimated by the average of `temp_min_col` and `temp_max_col`.""" tsutils._printiso( pet.oudin_form( lat, source_units=source_units, temp_min_col=temp_min_col, temp_max_col=temp_max_col, temp_mean_col=temp_mean_col, k1=k1, k2=k2, input_ts=input_ts, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, target_units=target_units, print_input=print_input, ), tablefmt=tablefmt, )
def humidity_cli( method, source_units, input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", hum_min_col=None, hum_max_col=None, hum_mean_col=None, a0=None, a1=None, kr=None, hourly_temp=None, preserve_daily_mean=None, ): """Disaggregate daily relative humidity to hourly humidity. Relative humidity disaggregation requires the following input data. +--------------+---------------------------------------------+ | Input data | Description | +==============+=============================================+ | hum_min_col | Required column name or number representing | | | the minimum daily relative humidity. | +--------------+---------------------------------------------+ | hum_max_col | Required column name or number representing | | | the maximum daily relative humidity. | +--------------+---------------------------------------------+ | hum_mean_col | Optional column name or number representing | | | the average daily relative humidity. | | | Default is None and if None will be | | | calculated as average of `hum_tmin_col` and | | | `hum_tmax_col`. | +--------------+---------------------------------------------+ Parameters ---------- method: str Available disaggregation methods for humidity. +---------------------------+-------------------------------+ | `method` | Description | +===========================+===============================+ | equal | Duplicate mean daily humidity | | | for the 24 hours of the day. | +---------------------------+-------------------------------+ | minimal | The dew point temperature is | | | set to the minimum | | | temperature on that day. | +---------------------------+-------------------------------+ | dewpoint_regression | Using hourly observations, a | | | regression approach is | | | applied to calculate daily | | | dew point temperature. | | | Regression parameters must be | | | specified. | +---------------------------+-------------------------------+ | linear_dewpoint_variation | This method extends through | | | linearly varying dew point | | | temperature between | | | consecutive days. The | | | parameter kr needs to be | | | specified (kr=6 if monthly | | | radiation exceeds 100 W/m2 | | | else kr=12). | +---------------------------+-------------------------------+ | min_max | This method requires minimum | | | and maximum relative humidity | | | for each day. | +---------------------------+-------------------------------+ | month_hour_precip_mean | Calculate hourly humidity | | | from categorical | | | [month, hour, precip(y/n)] | | | mean values derived from | | | observations. | +---------------------------+-------------------------------+ Required keywords for each method. The "Column Name/Index Keywords" represent the column name or index (data columns starting numbering at 1) in the input dataset. +---------------------------+----------------+---------------+ | `method` | Column Name/ | Other | | | Index Keywords | Keywords | +---------------------------+----------------+---------------+ | equal | `hum_mean_col` | | +---------------------------+----------------+---------------+ | minimal | `temp_min_col` | `hourly_temp` | +---------------------------+----------------+---------------+ | dewpoint_regression | `temp_min_col` | `a0` | | | | `a1` | | | | `hourly_temp` | +---------------------------+----------------+---------------+ | linear_dewpoint_variation | `temp_min_col` | `a0` | | | | `a1` | | | | `kr` | | | | `hourly_temp` | +---------------------------+----------------+---------------+ | min_max | `hum_min_col` | `hourly_temp` | | | `hum_max_col` | | | | `temp_min_col` | | | | `temp_max_col` | | +---------------------------+----------------+---------------+ | month_hour_precip_mean | `precip_col` | | +---------------------------+----------------+---------------+ {psource_units} {input_ts} {columns} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} hum_min_col: Column index (data columns start numbering at 1) or column name from the input data that contains the daily minimum humidity. hum_max_col: Column index (data columns start numbering at 1) or column name from the input data that contains the daily maximum humidity. hum_mean_col: Column index (data columns start numbering at 1) or column name from the input data that contains the daily maximum humidity. a0: float The "a0" parameter. a1: float The "a1" parameter. kr: int Parameter for the "linear_dewpoint_variation" method. hourly_temp: str Filename of a CSV file that contains an hourly time series of temperatures. preserve_daily_mean: str Column name or index (data columns start at 1) that identifies the observed daily mean humidity. If not None will correct the daily mean values of the disaggregated data with the observed daily mean humidity. """ tsutils._printiso( disaggregate.humidity( method, input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, source_units=source_units, target_units=target_units, print_input=print_input, hum_min_col=hum_min_col, hum_max_col=hum_max_col, hum_mean_col=hum_mean_col, a0=a0, a1=a1, kr=kr, hourly_temp=hourly_temp, preserve_daily_mean=preserve_daily_mean, ), tablefmt=tablefmt, )
def evaporation_cli( method, source_units, input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", lat=None, ): """Disaggregate daily evaporation to hourly evaporation. Parameters ---------- method: str This is the method that will be used to disaggregate the daily evaporation data. There are two methods, a trapezoidal shape from sunrise to sunset called "trap" and a fixed, smooth curve starting at 0700 (7 am) and stopping at 1900 (7 pm) called "fixed". {source_units} {input_ts} {columns} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} lat: float The latitude of the station. Positive specifies the Northern Hemisphere, and negative values represent the Southern Hemisphere. """ tsutils._printiso( disaggregate.evaporation( method, input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, source_units=source_units, target_units=target_units, print_input=print_input, lat=lat, ), tablefmt=tablefmt, )
def allen_cli( lat, temp_min_col=None, temp_max_col=None, temp_mean_col=None, source_units=None, input_ts="-", start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", ): """Allen PET: f(Tmin, Tmax, Tavg, latitude) Average daily temperature can be supplied or if not, calculated by (Tmax+Tmin)/2. Parameters ---------- lat: float The latitude of the station. Positive specifies the Northern Hemisphere, and negative values represent the Southern Hemisphere. temp_min_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily minimum temperature. temp_max_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily maximum temperature. source_units If unit is specified for the column as the second field of a ':' delimited column name, then the specified units and the 'source_units' must match exactly. Any unit string compatible with the 'pint' library can be used. Since there are two required input columns ("temp_min_col" and "temp_max_col") and one optional input column ("temp_mean_col") you need to supply units for each input column in `source_units`. Command line:: mettoolbox pet allen 24 1 2 degF,degF < tmin_tmax_data.csv Python:: from mettoolbox import mettoolbox as mt df = mt.pet.allen(24, 1, 2, ["degF", "degF"], input_ts="tmin_tmax_data.csv") {input_ts} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} temp_mean_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily mean temperature. If None will be estimated by the average of `temp_min_col` and `temp_max_col`.""" tsutils._printiso( pet.allen( lat, temp_min_col=temp_min_col, temp_max_col=temp_max_col, temp_mean_col=temp_mean_col, source_units=source_units, input_ts=input_ts, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, target_units=target_units, print_input=print_input, ), tablefmt=tablefmt, )
def hamon_cli( lat, temp_min_col=None, temp_max_col=None, temp_mean_col=None, k=1.2, source_units=None, input_ts="-", start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", ): """Hamon PET: f(Tavg, latitude) Average daily temperature can be supplied or if not, calculated by (Tmax+Tmin)/2. Parameters ---------- lat: float The latitude of the station. Positive specifies the Northern Hemisphere, and negative values represent the Southern Hemisphere. temp_min_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily minimum temperature. temp_max_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily maximum temperature. k: float A scaling factor, defaults to 1. This is an adjustment for local conditions, for example, Lu, 2005 found that k=1.2 was a better fit for the southeastern United States. source_units If unit is specified for the column as the second field of a ':' delimited column name, then the specified units and the 'source_units' must match exactly. Any unit string compatible with the 'pint' library can be used. Since there are two required input columns ("temp_min_col" and "temp_max_col") and one optional input column ("temp_mean_col") you need to supply units for each input column in `source_units`. Command line:: mettoolbox pet hamon 24 1 2 degF,degF < tmin_tmax_data.csv Python:: from mettoolbox import mettoolbox as mt df = mt.pet.hamon(24, 1, 2, ["degF", "degF"], input_ts="tmin_tmax_data.csv") {input_ts} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {target_units} {print_input} {tablefmt} temp_mean_col: str, int The column name or number (data columns start numbering at 1) in the input data that represents the daily mean temperature. If None will be estimated by the average of `temp_min_col` and `temp_max_col`. References ---------- Lu et al. (2005). A comparison of six potential evaportranspiration methods for regional use in the southeastern United States. Journal of the American Water Resources Association, 41, 621- 633.""" tsutils._printiso( pet.hamon( lat, temp_min_col=temp_min_col, temp_max_col=temp_max_col, temp_mean_col=temp_mean_col, k=k, source_units=source_units, input_ts=input_ts, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, target_units=target_units, print_input=print_input, ), tablefmt=tablefmt, )
def precipitation_cli( method, source_units, input_ts="-", start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", columns=None, masterstation_hour_col=None, ): """Disaggregate daily precipitation to hourly precipitation. Parameters ---------- method: str Disaggregation methods available for precipitation. +---------------+--------------------------------------------+ | `method` | Description | +===============+============================================+ | equal | In order to derive hourly from daily | | | values, the daily total is simply divided | | | by 24 resulting in an equal distribution. | +---------------+--------------------------------------------+ | cascade | The cascade model is more complex and | | | requires a parameter estimation method. | +---------------+--------------------------------------------+ | masterstation | If hourly values are available for another | | | site in the vicinity of the station | | | considered, the cumulative sub-daily mass | | | curve can be transferred from the station | | | that provides hourly values to the station | | | of interest. | +---------------+--------------------------------------------+ {input_ts} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {psource_units} {target_units} {print_input} {tablefmt} {columns} masterstation_hour_col The column number or name that contains the hourly data used as the reference station. """ tsutils._printiso( disaggregate.precipitation( method, input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, source_units=source_units, target_units=target_units, print_input=print_input, masterstation_hour_col=masterstation_hour_col, ), tablefmt=tablefmt, )
def radiation_cli( method, source_units, input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", clean=False, round_index=None, skiprows=None, index_type="datetime", names=None, target_units=None, print_input=False, tablefmt="csv", pot_rad=None, angstr_a=None, angstr_b=None, bristcamp_a=None, bristcamp_c=None, mean_course=None, lat=None, lon=None, hourly_rad=None, ): """Disaggregate daily radiation to hourly radiation. Parameters ---------- method: str Disaggregation methods available for radiation +-----------------+-----------------------------------------+ | `method` | Description | +=================+=========================================+ | pot_rad | This method allows one to disaggregate | | | daily averages of shortwave radiation | | | using hourly values of potential | | | (clear-sky) radiation calculated for | | | the location of the station. | +-----------------+-----------------------------------------+ | pot_rad_via_ssd | If daily sunshine recordings are | | | available, the Angstrom model is | | | applied to transform sunshine duration | | | to shortwave radiation. | +-----------------+-----------------------------------------+ | pot_rad_via_bc | In this case, the Bristow-Campbell | | | model is applied which relates minimum | | | and maximum temperature to shortwave | | | radiation. | +-----------------+-----------------------------------------+ | mean_course | hourly radiation follows an observed | | | average course (calculated for each | | | month) while preserving the daily mean. | +-----------------+-----------------------------------------+ {input_ts} {columns} {start_date} {end_date} {dropna} {clean} {round_index} {skiprows} {index_type} {names} {psource_units} {target_units} {print_input} {tablefmt} pot_rad: str hourly dataframe including potential radiation angstr_a: float parameter a of the Angstrom model (intercept) angstr_b: float parameter b of the Angstrom model (slope) bristcamp_a: float parameter a for bristcamp bristcamp_c: float parameter c for bristcamp hourly_rad: str monthly values of the mean hourly radiation course lat: float Latitude lon: float Longitude mean_course: Filename of HOURLY CSV file that contains radiation values to be used with the "mean_course" method. """ tsutils._printiso( disaggregate.radiation( method, input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, clean=clean, round_index=round_index, skiprows=skiprows, index_type=index_type, names=names, source_units=source_units, target_units=target_units, print_input=print_input, pot_rad=pot_rad, angstr_a=angstr_a, angstr_b=angstr_b, bristcamp_a=bristcamp_a, bristcamp_c=bristcamp_c, hourly_rad=hourly_rad, lat=lat, lon=lon, ), tablefmt=tablefmt, )