def listvariables(filename, tablefmt='csv_nos', header='default'): """List variables available for each type. The type are "subcatchment", "node", "link", "pollutant", "system". Parameters ---------- {filename} {tablefmt} {header} """ obj = SwmmExtract(filename) if header == 'default': header = ['TYPE', 'DESCRIPTION', 'VARINDEX'] # 'pollutant' really isn't it's own itemtype # but part of subcatchment, node, and link... collect = [] for itemtype in ['subcatchment', 'node', 'link', 'system']: typenumber = obj.type_check(itemtype) obj.update_var_code(typenumber) for i in obj.vars[typenumber]: try: collect.append( [itemtype, obj.varcode[typenumber][i].decode(), i]) except (TypeError, AttributeError): collect.append( [itemtype, str(obj.varcode[typenumber][i]), str(i)]) return tsutils.printiso(collect, tablefmt=tablefmt, headers=header)
def listdsns(wdmpath): """Print out a table describing all DSNs in the WDM. Parameters ---------- wdmpath Path and WDM filename. """ if not os.path.exists(wdmpath): raise ValueError(""" * * File {0} does not exist. * """.format(wdmpath)) collect = OrderedDict() for i in range(1, 32001): try: testv = _describedsn(wdmpath, i) except wdmutil.WDMError: continue for key in [ 'DSN', 'SCENARIO', 'LOCATION', 'CONSTITUENT', 'TSTYPE', 'START_DATE', 'END_DATE', 'TCODE', 'TSTEP' ]: collect.setdefault(key, []).append(testv[key.lower()]) return tsutils.printiso(collect, tablefmt='plain')
def dump(hbnfilename, time_stamp='begin'): ''' Prints out ALL data from a HSPF binary output file. :param hbnfilename: The HSPF binary output file :param time_stamp: For the interval defines the location of the time stamp. If set to 'begin', the time stamp is at the begining of the interval. If set to any other string, the reported time stamp will represent the end of the interval. Default is 'begin'. ''' if time_stamp not in ['begin', 'end']: raise ValueError(''' * * The "time_stamp" optional keyword must be either * "begin" or "end". You gave {0}. * '''.format(time_stamp)) index, data = _get_data(hbnfilename, None, [',,,'], catalog_only=False) skeys = list(data.keys()) skeys.sort() result = pd.concat([pd.Series(data[i], index=index) for i in skeys], axis=1, join_axes=[pd.Index(index)]) columns = ['{0}_{1}_{2}_{3}'.format(i[1], i[2], i[4], i[5]) for i in skeys] result.columns = columns if time_stamp == 'begin': result = tsutils.asbestfreq(result) result = result.tshift(-1) return tsutils.printiso(result)
def listdetail(filename, itemtype, name='', tablefmt='simple', header='default'): """List nodes and metadata in output file. Parameters ---------- {filename} {itemtype} name : str [optional, default is ''] Specific name to print only that entry. This can be looked up using 'listvariables'. {tablefmt} {header} """ obj = SwmmExtract(filename) typenumber = obj.type_check(itemtype) if name: objectlist = [obj.name_check(itemtype, name)[0]] else: objectlist = obj.names[typenumber] propnumbers = obj.propcode[typenumber] if header == 'default': header = ['#Name'] + [PROPCODE[typenumber][i] for i in propnumbers] collect = [] for i, oname in enumerate(objectlist): printvar = [oname] for j in obj.prop[typenumber][i]: if j[0] == 0: printvar.append(TYPECODE[typenumber][j[1]]) else: printvar.append(j[1]) collect.append(printvar) df = pd.DataFrame(collect) cheader = [] for head in header: if head not in cheader: cheader.append(head) else: cnt = cheader.count(head) cheader.append('{0}.{1}'.format(head, cnt)) df.columns = cheader return tsutils.printiso(df, tablefmt=tablefmt, headers=header)
def extract(*wdmpath, **kwds): """Print out DSN data to the screen with ISO-8601 dates. This is the API version also used by 'extract_cli' """ # Adapt to both forms of presenting wdm files and DSNs # Old form '... file.wdm 101 102 103 ...' # New form '... file.wdm,101 adifferentfile.wdm,101 ... try: start_date = kwds.pop('start_date') except KeyError: start_date = None try: end_date = kwds.pop('end_date') except KeyError: end_date = None if len(kwds) > 0: raise ValueError(""" * * The only allowed keywords are start_date and end_date. You * have given {0}. * """.format(kwds)) labels = [] for lab in wdmpath: if ',' in str(lab): labels.append(lab.split(',')) else: if lab == wdmpath[0]: continue labels.append([wdmpath[0], lab]) for index, lab in enumerate(labels): wdmpath = lab[0] dsn = lab[1] nts = WDM.read_dsn(wdmpath, int(dsn), start_date=start_date, end_date=end_date) if index == 0: result = nts else: try: result = result.join(nts, how='outer') except: raise ValueError(""" * * The column {0} is duplicated. Dataset names must be unique. * """.format(nts.columns[0])) return tsutils.printiso(result)
def extract(*wdmpath, **kwds): ''' Prints out DSN data to the screen with ISO-8601 dates. This is the API version also used by 'extract_cli' ''' # Adapt to both forms of presenting wdm files and DSNs # Old form '... file.wdm 101 102 103 ...' # New form '... file.wdm,101 adifferentfile.wdm,101 ... try: start_date = kwds.pop('start_date') except KeyError: start_date = None try: end_date = kwds.pop('end_date') except KeyError: end_date = None if len(kwds) > 0: raise ValueError(''' * * The only allowed keywords are start_date and end_date. You * have given {0}. * '''.format(kwds)) labels = [] for lab in wdmpath: if ',' in str(lab): labels.append(lab.split(',')) else: if lab == wdmpath[0]: continue labels.append([wdmpath[0], lab]) for index, lab in enumerate(labels): wdmpath = lab[0] dsn = lab[1] nts = WDM.read_dsn(wdmpath, int(dsn), start_date=start_date, end_date=end_date) if index == 0: result = nts else: try: result = result.join(nts, how='outer') except: raise ValueError(''' * * The column {0} is duplicated. Dataset names must be unique. * '''.format(nts.columns[0])) return tsutils.printiso(result)
def catalog(filename, itemtype='', tablefmt='csv_nos', header='default', retval=False): """List the catalog of objects in output file. This catalog list is all of the labels that can be used in the extract routine. Parameters ---------- {filename} {itemtype} {tablefmt} {header} retval [optional, default is False] Whether to just return a dictionary. Only useful when using the Python API.""" obj = SwmmExtract(filename) if itemtype: typenumber = obj.type_check(itemtype) plist = [typenumber] else: plist = list(range(len(obj.itemlist))) if header == 'default': header = ['TYPE', 'NAME', 'VARIABLE'] collect = [] for itemtype in ['subcatchment', 'node', 'link', 'system']: typenumber = obj.type_check(itemtype) obj.update_var_code(typenumber) for i in plist: typenumber = obj.type_check(obj.itemlist[i]) for oname in obj.names[i]: if obj.itemlist[i] == 'pollutant': continue if obj.itemlist[i] == 'system': collect.append(['system', oname, oname]) continue for j in obj.vars[typenumber]: collect.append([obj.itemlist[i], oname, obj.varcode[typenumber][j]]) if retval is True: return collect return tsutils.printiso(collect, tablefmt=tablefmt, headers=header)
def listdetail(filename, itemtype, name='', tablefmt='simple', header='default'): """List nodes and metadata in output file. Parameters ---------- {filename} {itemtype} name : str Optional specific name to print only that entry. This can be looked up using 'listvariables'. {tablefmt} {header} """ obj = SwmmExtract(filename) typenumber = obj.type_check(itemtype) if name: objectlist = [obj.name_check(itemtype, name)[0]] else: objectlist = obj.names[typenumber] propnumbers = obj.propcode[typenumber] if header == "default": header = ['#Name'] + [PROPCODE[typenumber][i] for i in propnumbers] collect = [] for i, oname in enumerate(objectlist): printvar = [oname] for j in obj.prop[typenumber][i]: if j[0] == 0: printvar.append(TYPECODE[typenumber][j[1]]) else: printvar.append(j[1]) collect.append(printvar) df = pd.DataFrame(collect) cheader = [] for head in header: if head not in cheader: cheader.append(head) else: cnt = cheader.count(head) cheader.append('{0}.{1}'.format(head, cnt)) df.columns = cheader return tsutils.printiso(df, tablefmt=tablefmt, headers=header)
def extract(filename, itemtype, name, variableindex): """ Get the time series data for a particular object and variable Requires: filename: Filename of SWMM output file. itemtype: a SWMM variable type, such as `node` name: the variable name in SWMM, such as the node name variableindex: int, corresponds to a variable number. Can be retrieved with swmmtoolbox.listvariables """ obj = _SwmmExtract(filename) typenumber = obj.TypeCheck(itemtype) if itemtype != 'system': name = obj.NameCheck(itemtype, name)[0] # This is the band-aid for correctly reading in # pollutants... replace with something cleaner... start = len(VARCODE[typenumber]) end = start + len(obj.names[3]) nlabels = list(range(start, end)) ndict = dict(list(zip(nlabels, obj.names[3]))) VARCODE[typenumber].update(ndict) begindate = datetime.datetime(1899, 12, 30) dates = [] values = [] for time in range(obj.nperiods): date, value = obj.GetSwmmResults( typenumber, name, int(variableindex), time) days = int(date) seconds = (date - days)*86400 date = begindate + datetime.timedelta( days=days, seconds=seconds) dates.append(date) values.append(value) jtsd = pd.DataFrame( pd.Series(values, index=dates), columns=['{0}_{1}_{2}'.format( itemtype, name, VARCODE[typenumber][int(variableindex)])]) try: result = result.join(jtsd) except NameError: result = jtsd return tsutils.printiso(result)
def extract(filename, *labels): """Get the time series data for a particular object and variable. Parameters ---------- {filename} {labels} """ obj = SwmmExtract(filename) jtsd = [] for label in labels: itemtype, name, variableindex = label.split(',') typenumber = obj.type_check(itemtype) # if itemtype != 'system': name = obj.name_check(itemtype, name)[0] obj.update_var_code(typenumber) begindate = datetime.datetime(1899, 12, 30) dates = [] values = [] for time in range(obj.swmm_nperiods): date, value = obj.get_swmm_results(typenumber, name, int(variableindex), time) days = int(date) seconds = int((date - days) * 86400) extra = seconds % 10 if extra != 0: if extra == 9: seconds = seconds + 1 if extra == 1: seconds = seconds - 1 date = begindate + datetime.timedelta(days=days, seconds=seconds) dates.append(date) values.append(value) jtsd.append( pd.DataFrame(pd.Series(values, index=dates), columns=[ '{0}_{1}_{2}'.format( itemtype, name, obj.varcode[typenumber][int(variableindex)]) ])) result = pd.concat(jtsd, axis=1, join_axes=[jtsd[0].index]) return tsutils.printiso(result)
def listvariables(filename, tablefmt='csv_nos', header='default', retval=False): """List variables available for each type. The type are "subcatchment", "node", "link", "pollutant", "system". Parameters ---------- {filename} {tablefmt} {header} retval [optional, default is False] Whether to just return a dictionary. Only useful when using the Python API.""" obj = SwmmExtract(filename) if header == 'default': header = ['TYPE', 'DESCRIPTION', 'VARINDEX'] # 'pollutant' really isn't it's own itemtype # but part of subcatchment, node, and link... collect = [] for itemtype in ['subcatchment', 'node', 'link', 'system']: typenumber = obj.type_check(itemtype) obj.update_var_code(typenumber) for i in obj.vars[typenumber]: try: collect.append([itemtype, obj.varcode[typenumber][i].decode(), i]) except (TypeError, AttributeError): collect.append([itemtype, str(obj.varcode[typenumber][i]), str(i)]) if retval is True: return collect return tsutils.printiso(collect, tablefmt=tablefmt, headers=header)
def catalog(filename, itemtype='', tablefmt='simple', header='default'): """List the catalog of objects in output file Parameters ---------- {filename} {itemtype} {tablefmt} {header} """ obj = SwmmExtract(filename) if itemtype: typenumber = obj.type_check(itemtype) plist = [typenumber] else: plist = list(range(len(obj.itemlist))) if header == 'default': header = ['TYPE', 'NAME'] collect = [] for i in plist: for oname in obj.names[i]: collect.append([obj.itemlist[i], oname]) return tsutils.printiso(collect, tablefmt=tablefmt, headers=header)
def extract(hbnfilename, interval, *labels, **kwds): ''' Prints out data to the screen from a HSPF binary output file. :param hbnfilename: The HSPF binary output file :param interval: One of 'yearly', 'monthly', 'daily', or 'BIVL'. The 'BIVL' option is a sub-daily interval defined in the UCI file. Typically 'BIVL' is used for hourly output, but can be set to any value that evenly divides into a day. :param labels: The remaining arguments uniquely identify a time-series in the binary file. The format is 'OPERATIONTYPE,ID,SECTION,VARIABLE'. For example: 'PERLND,101,PWATER,UZS IMPLND,101,IWATER,RETS' Leaving a section without an entry will wildcard that specification. To get all the PWATER variables for PERLND 101 the label would read: 'PERLND,101,PWATER,' To get TAET for all PERLNDs: 'PERLND,,,TAET' Note that there are spaces ONLY between label specifications. :param time_stamp: For the interval defines the location of the time stamp. If set to 'begin', the time stamp is at the begining of the interval. If set to any other string, the reported time stamp will represent the end of the interval. Default is 'begin'. Place after ALL labels. :param sorted: Should ALL columns be sorted? Default is False. Place after ALL labels. ''' try: time_stamp = kwds.pop('time_stamp') except KeyError: time_stamp = 'begin' if time_stamp not in ['begin', 'end']: raise ValueError(''' * * The "time_stamp" optional keyword must be either * "begin" or "end". You gave {0}. * '''.format(time_stamp)) try: sortall = bool(kwds.pop('sorted')) except KeyError: sortall = False if not (sortall is True or sortall is False): raise ValueError(''' * * The "sorted" optional keyword must be either * True or False. You gave {0}. * '''.format(sortall)) if len(kwds) > 0: raise ValueError(''' * * The extract command only accepts optional keywords 'time_stamp' and * 'sorted'. You gave {0}. * '''.format(list(kwds.keys()))) interval = interval.lower() if interval not in ['bivl', 'daily', 'monthly', 'yearly']: raise ValueError(''' * * The "interval" argument must be one of "bivl", * "daily", "monthly", or "yearly". You supplied * "{0}". * '''.format(interval)) index, data = _get_data(hbnfilename, interval, labels, catalog_only=False) index = index[interval2codemap[interval]] index = list(index.keys()) index.sort() skeys = list(data.keys()) if sortall is True: skeys.sort(key=lambda tup: tup[1:]) else: skeys.sort() result = pd.concat([pd.Series(data[i], index=index) for i in skeys], axis=1, join_axes=[pd.Index(index)]) columns = ['{0}_{1}_{2}_{3}'.format(i[1], i[2], i[4], i[5]) for i in skeys] result.columns = columns if time_stamp == 'begin': result = tsutils.asbestfreq(result) result = result.tshift(-1) return tsutils.printiso(result)