def daytomonth(inputfile): d=pyhecdss.DSSFile(inputfile) outputfile = os.getcwd()+"/"+inputfile.split(".")[0]+"_mon.dss" do=pyhecdss.DSSFile(outputfile, create_new=True) plist=d.get_pathnames() for p in plist: df,u,p=d.read_rts(p) do.write_rts(df.columns[0].replace('1DAY','1MON'),df.resample('M').mean(),u,'PER-AVER') d.close() do.close()
def test_set_message_level(self): fname = "test1.dss" print( "No easy way to check automatically. Just look at screen and see if lot of messages are printed?" ) pyhecdss.set_message_level(10) d1 = pyhecdss.DSSFile(fname) d1.close() print( 'No easy way to check automatically. Just look at screen and no DSS messages should be printed' ) pyhecdss.set_message_level(0) d1 = pyhecdss.DSSFile(fname) d1.close()
def test_missing_dir(self): ''' missing directory in filename causes crash. So check before trying to open ''' fname = 'testnew.dss' if os.path.exists(fname): os.remove(fname) d = pyhecdss.DSSFile(fname) d.close() assert os.path.exists(fname) with pytest.raises(FileNotFoundError): fname2 = 'no_such_dir/testnew.dss' d = pyhecdss.DSSFile(fname2) d.close()
def PP_DSS(): inputfile ="./RO_island.dss" outputfile = "./DP_island.dss" pyhecdss.set_message_level(0) dssfh=pyhecdss.DSSFile(inputfile) dssofh=pyhecdss.DSSFile(outputfile, create_new=True) plist=dssfh.get_pathnames() for p in plist: df,u,p=dssfh.read_rts(p) df.values[:]=df.values[:]*0.25 dssofh.write_rts(df.columns[0].replace("RO-FLOW","DP-FLOW"),df,u,p) #'PER-AVER') dssfh.close() dssofh.close()
def test_write_its_series(self): fname = "test1.dss" pathname = '/TEST/ITS1/VANILLA//IR-YEAR/RANDOM/' ta = pd.to_datetime(['01apr1990', '05nov1991', '07apr1997']) df = pd.Series([0.5, 0.6, 0.7], index=ta) cunits, ctype = 'CCC', 'INST-VAL' with pyhecdss.DSSFile(fname) as dssfile2: dssfile2.write_its(pathname, df, cunits, ctype) with pyhecdss.DSSFile(fname) as dssfile1: df2, cunits2, ctype2 = dssfile1.read_its(pathname, "01JAN1990", "01JAN1998") self.assertEqual(ctype, ctype2) self.assertEqual(cunits, cunits2) self.assertEqual(df2.iloc[0, 0], df.iloc[0]) self.assertEqual(df2.iloc[1, 0], df.iloc[1]) self.assertEqual(df2.iloc[2, 0], df.iloc[2])
def test_store_as_instval(tsdaily): dssfilename = 'test_offset.dss' cleanup(dssfilename) with pyhecdss.DSSFile(dssfilename, create_new=True) as dssfh: pathname = tsdaily.columns[0] dssfh.write_rts(pathname, tsdaily, 'XXX', 'INST-VAL') with pyhecdss.DSSFile(dssfilename) as dssfh: dfcat = dssfh.read_catalog() plist = dssfh.get_pathnames(dfcat[dfcat.F == 'TEST-OFFSET']) assert len(plist) == 1 df, cunits, ctype = dssfh.read_rts(plist[0]) assert cunits == 'XXX' assert ctype == 'INST-VAL' pd.testing.assert_series_equal(tsdaily.iloc[:, 0], df.iloc[:, 0], check_names=False)
def test_write_its(self): fname = "test1.dss" dssfile1 = pyhecdss.DSSFile(fname) pathname = '/TEST/ITS1/VANILLA//IR-YEAR/RANDOM/' ta = pd.to_datetime(['01apr1990', '05nov1991', '07apr1997']) df = pd.DataFrame([0.5, 0.6, 0.7], index=ta, columns=["random"]) cunits, ctype = 'CCC', 'INST-VAL' dssfile2 = pyhecdss.DSSFile(fname) dssfile2.write_its(pathname, df, cunits, ctype) df2, cunits2, ctype2 = dssfile1.read_its(pathname, "01JAN1990", "01JAN1998") self.assertEqual(ctype, ctype2) self.assertEqual(cunits, cunits2) self.assertEqual(df2.iloc[0, 0], df.iloc[0, 0]) self.assertEqual(df2.iloc[1, 0], df.iloc[1, 0]) self.assertEqual(df2.iloc[2, 0], df.iloc[2, 0])
def test_write_ts(self): fname = "test_rts1.dss" dssfile1 = pyhecdss.DSSFile(fname) pathname = '/TEST1/ONLY1/VANILLA//1DAY/JUST-ONES/' startDateStr, endDateStr = '01JAN1990 0100', '01JAN1991 0100' dtr = pd.date_range(startDateStr, endDateStr, freq='1D') df = pd.DataFrame(np.ones(len(dtr), 'd'), index=dtr) cunits, ctype = 'CCC', 'INST-VAL' dssfile2 = pyhecdss.DSSFile(fname) dssfile2.write_rts(pathname, df, cunits, ctype) startDateStr = "01JAN1990" endDateStr = "01JAN1991" df2, cunits2, ctype2 = dssfile1.read_rts(pathname, startDateStr, endDateStr) self.assertEqual(ctype, ctype2) self.assertEqual(cunits, cunits2) self.assertEqual(1, df.iloc[0, 0])
def test_store_and_read(): dssfilename = 'testbug1.dss' cleanup(dssfilename) arr = np.array([1.0, 2.0, 3.0]) dfr = pd.DataFrame(arr, index=pd.period_range('NOV1990', periods=len(arr), freq='1M')) with pyhecdss.DSSFile(dssfilename, create_new=True) as d: d.write_rts('/SAMPLE0/ARR/OP//1MON//', dfr, '', 'PER-AVER') with pyhecdss.DSSFile(dssfilename) as d: catdf = d.read_catalog() plist = d.get_pathnames(catdf) dfr2, units, type = d.read_rts(plist[0]) pd.testing.assert_series_equal(dfr.iloc[:, 0], dfr2.iloc[:, 0], check_names=False)
def do_catalog_with_lock(dssfile): ''' Does catalog by first acquiring system wide lock on lockfile. This is because HEC-DSS version 6 uses a single temporary catalog file across any process that is attempting a catalog on a DSS File ''' with pyhecdss.DSSFile(dssfile) as dssh: dssh.do_catalog()
def test_except_on_bad_path(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) pathname = '/SAMPLE/INVALID_MARKER/WAVE/01JAN1990/15MIN/SAMPLE1/' sdate = '01JAN1990' edate = '31JAN1990' # values,units,periodtype=dssfile.read_rts(pathname,sdate,edate) self.assertRaises(RuntimeError, dssfile.read_rts, pathname, sdate, edate)
def test_num_values_in_interval(self): fname = 'testnew.dss' if os.path.exists(fname): os.remove(fname) d = pyhecdss.DSSFile(fname) d.close() #only checking if values are greater than expected, HECLIB will return the exact number of values found assert d.num_values_in_interval('01JAN2000', '01FEB2000', '1DAY') > 31 assert d.num_values_in_interval('01JAN2000', '01FEB2000', '1MON') > 1 assert d.num_values_in_interval('01JAN2000', '01FEB2000', '1YEAR') > 0
def test_store_as_perval(tsdaily): dssfilename = 'test_offset.dss' cleanup(dssfilename) with pyhecdss.DSSFile(dssfilename, create_new=True) as dssfh: pathname = tsdaily.columns[0] dssfh.write_rts(pathname, tsdaily, 'XXX', 'PER-VAL') with pyhecdss.DSSFile(dssfilename) as dssfh: dfcat = dssfh.read_catalog() plist = dssfh.get_pathnames(dfcat[dfcat.F == 'TEST-OFFSET']) assert len(plist) == 1 df, cunits, ctype = dssfh.read_rts(plist[0]) assert cunits == 'XXX' assert ctype == 'PER-VAL' # --FIXME -- this is asserting as fail: see issue https://github.com/CADWRDeltaModeling/pyhecdss/issues/12 with pytest.raises(AssertionError): pd.testing.assert_frame_equal(tsdaily, df, check_names=False, check_column_type=False)
def changepaths(inDSSfile, pathfile, outDSSfile, EPART): f0 = open(pathfile, 'r') islands = [] for line in f0: if line: islands.append(line) dssifh = pyhecdss.DSSFile(inDSSfile) dssofh = pyhecdss.DSSFile(outDSSfile, create_new=True) for i in range(len(islands)): templ = islands[i] pathin = "//" + templ.split(",")[0].strip() + "/////" cpart = get_pathname(dssifh, pathin).split("/")[3] tdss, cunits, ctype = dssifh.read_rts(get_pathname(dssifh, pathin)) pathout = "/" + templ.split(",")[1].strip() + "/" + templ.split( ",")[0].strip() + "/" + cpart + "//" + EPART + "/" + templ.split( ",")[2].strip() + "/" dssofh.write_rts(pathout, tdss.shift(freq='D'), cunits, ctype) dssifh.close() dssofh.close()
def test_write_rts_series(self): ''' write_rts should work with pandas.Series as well. ''' fname = "test_rts1.dss" pathname = '/TEST1/ONLY1/VANILLA//1DAY/JUST-ONES-SERIES/' startDateStr, endDateStr = '01JAN1990 0100', '01JAN1991 0100' dtr = pd.date_range(startDateStr, endDateStr, freq='1D') s = pd.Series(np.ones(len(dtr), 'd'), index=dtr) cunits, ctype = 'CCC', 'INST-VAL' with pyhecdss.DSSFile(fname, create_new=True) as dssfile2: dssfile2.write_rts(pathname, s, cunits, ctype) startDateStr = "01JAN1990" endDateStr = "01JAN1991" with pyhecdss.DSSFile(fname) as dssfile1: df2, cunits2, ctype2 = dssfile1.read_rts(pathname, startDateStr, endDateStr) self.assertEqual(ctype, ctype2) self.assertEqual(cunits, cunits2) self.assertEqual(1, s.iloc[0])
def test_read_its(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) pathname = '/SAMPLE/ITS1/RANDOM/01JAN1990 - 01JAN1992/IR-YEAR/SAMPLE2/' values, units, periodtype = dssfile.read_its(pathname) self.assertEqual(units, 'YYY') self.assertEqual(periodtype, 'INST-VAL') self.assertEqual(len(values), 3) #get series vseries = values.iloc[:, 0] self.assertTrue(abs(vseries.at['01JAN1990 0317'] - 1.5) < 1e-03) self.assertTrue(abs(vseries.at['05SEP1992 2349'] - 2.7) < 1e-03)
def test_read_ts(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) pathname = '/SAMPLE/SIN/WAVE/01JAN1990/15MIN/SAMPLE1/' sdate = '01JAN1990' edate = '31JAN1990' values, units, periodtype = dssfile.read_rts(pathname, sdate, edate) self.assertEqual(units, 'UNIT-X') self.assertEqual(periodtype, 'INST-VAL') self.assertEqual(len(values['10JAN1990':'11JAN1990'].values), 96 * 2) # 96 15 min values per day #get series vseries = values.iloc[:, 0] self.assertTrue(abs(vseries.at['01JAN1990 0430'] - (-0.42578)) < 1e-03)
def store(self, df, units, bpart, cpart, epart, fpart): with pyhecdss.DSSFile(self.fname, create_new=True) as dh: if PostProCache.is_rts(df): dh.write_rts( '/%s/%s/%s//%s/%s/' % (PostProCache.A_PART, bpart.upper(), cpart.upper(), epart.upper(), fpart.upper()), df, units.upper(), 'INST-VAL') else: dh.write_its( '/%s/%s/%s//%s/%s/' % (PostProCache.A_PART, bpart.upper(), cpart.upper(), PostProCache.IRR_E_PART, fpart.upper()), df, units.upper(), 'INST-VAL')
def prep_df(scenarios, sta, var, intvl, df_wyt, period, src='ALL'): """ Generate DataFrame of required station + variable + time interval + time period from sets of DSM2 input or output or postprocess dss associated with water year type info Returns: -------- pandas.DataFrame : a DataFrame containing all """ dfs = [] for scenario in scenarios: fpath = scenario['fpath'] fparts = None if (src != 'ALL'): with pyhecdss.DSSFile(fpath) as d: catdf = d.read_catalog() fparts = catdf.F[0].split('-')[0] + '-' + src fparts = [fparts.upper()] name = scenario['name'] df_sn = read_dss_to_df( fpath, bparts_to_read=sta, cparts_to_read=var, eparts_to_read=intvl, fparts_to_read=fparts, ) df_sn['scenario_name'] = name dfs.append(df_sn) df = pd.concat(dfs) ds_parts = df['pathname'].map(lambda x: (x.split('/'))) df = df.assign(station=ds_parts.map(lambda x: x[2]), variable=ds_parts.map(lambda x: x[3]), interval=ds_parts.map(lambda x: x[5]), year=lambda x: x['time'].map(lambda y: y.year), month=lambda x: x['time'].map(lambda y: y.month)) # Add a wateryear type column df = df.assign(wateryear=lambda x: x['year']) mask = df['month'] > 9 df.loc[mask, 'wateryear'] += 1 # Join water year types df = df.join(df_wyt.set_index('wy')['sac_yrtype'], on='wateryear') df = df[df['time'] >= period[0]] df = df[df['time'] <= period[1]] return df
def split_BBID(divfile, spgfile, drnfile, rofile, outputfile,option): Tisland = 168 DCD_paths= ["DIV-WO-SPG-FLOW","SPG-FLOW","DRN-WO-RO-FLOW","RO-FLOW"] inputfiles = [divfile,spgfile,drnfile,rofile] # Reduce BBID amounts from the island outputs and add BBID into the island outputs BBIDisl = [33,34,41,103,128,130] for ifile in range(len(inputfiles)): extfile = "ext_" + inputfiles[ifile][2::] orgfile = inputfiles[ifile] dssout=pyhecdss.DSSFile(orgfile, create_new=True) for i in range(len(BBIDisl)): path1 = "/DICU-ISLAND/"+str(BBIDisl[i])+"/"+DCD_paths[ifile]+"//1DAY/DWR-BDO/" path2 = "/BBID/"+str(BBIDisl[i])+"/"+DCD_paths[ifile]+"//1DAY//" tdss1 = get_rts(orgfile,path1) tdss2 = get_rts(extfile,path2) pathout = "/BBID/"+str(BBIDisl[i])+"/"+DCD_paths[ifile]+"//1DAY/DWR-BDO/" dssout.write_rts(pathout,tdss2[0][0],tdss2[0][1],tdss2[0][2]) pathout = path1.replace(str(BBIDisl[i]),str(BBIDisl[i])+"_w_BBID") dssout.write_rts(pathout,tdss1[0][0],tdss1[0][1],tdss1[0][2]) tdss1[0][0].iloc[:,0] = tdss1[0][0].iloc[:,0] - tdss2[0][0].iloc[:,0] dssout.write_rts(path1,tdss1[0][0],tdss1[0][1],tdss1[0][2]) dssifh2=pyhecdss.DSSFile(extfile, create_new=True) dfcat=dssifh2.read_catalog() dfpath=dfcat[(dfcat.A!="BBID")] pathnames=dssifh2.get_pathnames(dfpath) for i in range(len(pathnames)): path1 = "/DICU-ISLAND/"+pathnames[i].split("/")[2]+"/"+DCD_paths[ifile]+"//1DAY/DWR-BDO/" tdss1,cunits,ctype = dssout.read_rts(get_pathname(dssout,path1,5)) tdss2,cunits,ctype = dssifh2.read_rts(pathnames[i]) if option == 2: tdss1.iloc[:,0] = tdss1.iloc[:,0]-tdss2.iloc[:,0] dssout.write_rts(path1,tdss1,cunits,ctype) dssifh2.close() dssout.close()
def test_read_write_cycle_rts(): ''' Test reading and writing of period time stamped data so that reads and writes don't result in shifting the data ''' fname = "test2.dss" if os.path.exists(fname): os.remove(fname) path = '/SAMPLE/SIN/WAVE/01JAN1990 - 01JAN1990/15MIN/SAMPLE1/' sina = np.sin(np.linspace(-np.pi, np.pi, 201)) dfr = pd.DataFrame(sina, index=pd.period_range('01jan1990 0100', periods=len(sina), freq='15T'), columns=[path]) d = pyhecdss.DSSFile(fname, create_new=True) unit2, ptype2 = 'UNIT-X', 'PER-VAL' d.write_rts(path, dfr, unit2, ptype2) d.close() # d2 = pyhecdss.DSSFile(fname) plist2 = d2.get_pathnames() path = plist2[0] dfr2, unit2, ptype2 = d.read_rts(path) pd.testing.assert_frame_equal(dfr, dfr2)
def islandtoDSM2node(divfile, spgfile, drnfile, rofile, outputfile): Tisland = 168 DCD_paths= ["DIV-WO-SPG-FLOW","SPG-FLOW","DRN-WO-RO-FLOW","RO-FLOW"] divratefile='../../../NODCU/DIVFCTR_CS3_NorOMR.2020' drnratefile='../../../NODCU/DRNFCTR_CS3_NorOMR.2020' inputfiles = [divfile,spgfile,drnfile,rofile] # Reduce BBID amounts from the island outputs and add BBID into the island outputs BBIDisl = [33,34,41,103,128,130] # Allocate island values to DSM2 nodes divalloc = 687 drnalloc = 427 divisl = [0]*divalloc divnode = [0]*divalloc divrate = [0.0]*divalloc drnisl = [0]*drnalloc drnnode = [0]*drnalloc drnrate = [0.0]*drnalloc f1 = open(divratefile) ili = 0 maxnode = 0 for line in f1: ili += 1 if line: if ili > 4: if line.strip() != "" : if int(line[0:5])>0 and int(line[5:11])>0: divisl[ili-5] = int(line[0:5]) divnode[ili-5] = int(line[5:11]) divrate[ili-5] = float(line[12:len(line)])*0.01 f1.close() f2 = open(drnratefile) ili = 0 for line in f2: ili += 1 if line: if ili > 4: if line.strip() != "": if int(line[0:5])>0 and int(line[5:11])>0: drnisl[ili-5] = int(line[0:5]) drnnode[ili-5] = int(line[5:11]) drnrate[ili-5] = float(line[12:len(line)])*0.01 f2.close() nodes = [] for i in range(len(divnode)): if divnode[i] not in nodes: nodes.append(divnode[i]) for i in range(len(drnnode)): if drnnode[i] not in nodes: nodes.append(drnnode[i]) Sortednodes = np.sort(nodes) dssout = pyhecdss.DSSFile(outputfile, create_new=True) for ifile in range(len(inputfiles)-1): orgfile = inputfiles[ifile] dssinputf = pyhecdss.DSSFile(orgfile) if ifile == 0 or ifile == 1: for i in range(len(Sortednodes)): nonode = 0 if Sortednodes[i]>0: for j in range(len(divnode)): if Sortednodes[i] == divnode[j]: nonode += 1 pathisl = "/DICU-ISLAND/"+str(divisl[j])+"/////" if nonode == 1: tdss1,cunits,ctype = dssinputf.read_rts(get_pathname(dssinputf,pathisl,2)) tdss1.iloc[:,0] = tdss1.iloc[:,0]*divrate[j] else: tdss2,cunits,ctype = dssinputf.read_rts(get_pathname(dssinputf,pathisl,2)) tdss1.iloc[:,0] = tdss1.iloc[:,0]+tdss2.iloc[:,0]*divrate[j] if nonode == 0: pathisl = "/DICU-ISLAND/1/////" tdss1,cunits,ctype = dssinputf.read_rts(get_pathname(dssinputf,pathisl,2)) tdss1.iloc[:,0] = tdss1.iloc[:,0]*0.0 if ifile == 0: pathout = "/DICU-HIST+NODE/" + str(Sortednodes[i])+"/DIV-FLOW//1DAY/DWR-BDO/" elif ifile == 1: pathout = "/DICU-HIST+NODE/" + str(Sortednodes[i])+"/SEEP-FLOW//1DAY/DWR-BDO/" dssout.write_rts(pathout,tdss1,cunits,ctype) elif ifile == 2: orgfile2 = inputfiles[ifile+1] dssinputf2 = pyhecdss.DSSFile(orgfile2) for i in range(len(Sortednodes)): nonode = 0 if Sortednodes[i]>0: for j in range(len(drnnode)): if Sortednodes[i] == drnnode[j]: nonode += 1 pathisl = "/DICU-ISLAND/"+str(drnisl[j])+"/////" if nonode == 1: tdss1,cunits,ctype = dssinputf.read_rts(get_pathname(dssinputf,pathisl,2)) tdssro,cunits,ctype = dssinputf2.read_rts(get_pathname(dssinputf2,pathisl,2)) tdss1.iloc[:,0] = tdss1.iloc[:,0]*drnrate[j]+tdssro.iloc[:,0]*drnrate[j] else: tdss2,cunits,ctype = dssinputf.read_rts(get_pathname(dssinputf,pathisl,2)) tdssro,cunits,ctype = dssinputf2.read_rts(get_pathname(dssinputf2,pathisl,2)) tdss1.iloc[:,0] = tdss1.iloc[:,0]+tdss2.iloc[:,0]*drnrate[j]+tdssro.iloc[:,0]*drnrate[j] if nonode == 0: pathisl = "/DICU-ISLAND/1/////" tdss1,cunits,ctype = dssinputf.read_rts(get_pathname(dssinputf,pathisl,2)) tdss1.iloc[:,0] = tdss1.iloc[:,0]*0.0 pathout = "/DICU-HIST+NODE/" + str(Sortednodes[i])+"/DRAIN-FLOW//1DAY/DWR-BDO/" dssout.write_rts(pathout,tdss1,cunits,ctype) dssinputf2.close() for i in range(len(BBIDisl)): pathname = "/BBID/"+str(BBIDisl[i])+"/"+DCD_paths[ifile]+"//1DAY/DWR-BDO/" if i == 0: print(pathname,inputfiles[ifile]) tdssb = get_rts(inputfiles[ifile],pathname) #print("1st Tdssb =",tdssb) else: tdsst = get_rts(inputfiles[ifile],pathname) tdssb[0][0].iloc[:,0] += tdsst[0][0].iloc[:,0] if ifile == 2: pathname = "/BBID/"+str(BBIDisl[i])+"/"+DCD_paths[ifile+1]+"//1DAY/DWR-BDO/" tdssro = get_rts(inputfiles[ifile+1],pathname) tdssb[0][0].iloc[:,0] += tdssro[0][0].iloc[:,0] if ifile == 0: pathout = "/DICU-HIST+RSVR/BBID/DIV-FLOW//1DAY/DWR-BDO/" elif ifile == 1: pathout = "/DICU-HIST+RSVR/BBID/SEEP-FLOW//1DAY/DWR-BDO/" elif ifile == 2: pathout = "/DICU-HIST+RSVR/BBID/DRAIN-FLOW//1DAY/DWR-BDO/" dssout.write_rts(pathout,tdssb[0][0].shift(-1),tdssb[0][1],tdssb[0][2]) #FIXME: why shift by 1 day? to match older results dssinputf.close() dssout.close()
def test_read_catalog(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) df = dssfile.read_catalog() self.assertTrue(len(df) >= 1)
def DCD_to_CALSIM_ISLAND(divfile,spgfile,drnfile,rofile,inputfile): inputfile = inputfile.split(".")[0]+"_mon.dss" outputfile = inputfile.split(".")[0]+"_C3.dss" dssofh=pyhecdss.DSSFile(outputfile, create_new=True) DCD_C3_islands = "../DCD_CALSIM3_islands_N.csv" C3_nodes = ["OMR","SJR_EAST","SJR_WEST","SAC_WEST","MOK","SAC_SOUTH","SAC_NORTH","50_PA2"] C3_paths = ["IRR","SEEP","DRN"] DSM2N_paths = ["DIV-FLOW","SEEP-FLOW","DRAIN-FLOW"] DCD_paths = ["DIV-WO-SPG-FLOW","SPG-FLOW","DRN-WO-RO-FLOW","RO-FLOW"] f0 = open(DCD_C3_islands) DCD_islands = [] ili = 0 for line in f0: ili += 1 if line: if ili > 1: DCD_islands.append(line) f0.close() for ipath in range(0,len(C3_paths)): if ipath == 0: dssifh=pyhecdss.DSSFile(divfile.split(".")[0]+"_mon.dss") elif ipath == 1: dssifh=pyhecdss.DSSFile(spgfile.split(".")[0]+"_mon.dss") elif ipath == 2: dssifh = pyhecdss.DSSFile(drnfile.split(".")[0]+"_mon.dss") dssifh2 = pyhecdss.DSSFile(rofile.split(".")[0]+"_mon.dss") for c3j in range(0,len(C3_nodes)-1): iisland = 0 for i in range(0,len(DCD_islands)): if C3_nodes[c3j] == DCD_islands[i].split(",")[1].strip(): iisland += 1 tempIsl = int(DCD_islands[i].split(",")[0].strip()) if ipath == 0 or ipath == 1: path = "/DICU-ISLAND/"+DCD_islands[i].split(",")[0].strip()+"/"+DCD_paths[ipath]+"//1MON/DWR-BDO/" if iisland == 1: tdss,cunits,ctype = dssifh.read_rts(get_pathname(dssifh,path,5)) else: ttss2,cunits,ctype = dssifh.read_rts(get_pathname(dssifh,path,5)) tdss.iloc[:,0]+=ttss2.iloc[:,0] elif ipath == 2: path = "/DICU-ISLAND/"+DCD_islands[i].split(",")[0].strip()+"/"+DCD_paths[ipath]+"//1MON/DWR-BDO/" path2 = "/DICU-ISLAND/"+DCD_islands[i].split(",")[0].strip()+"/"+DCD_paths[ipath+1]+"//1MON/DWR-BDO/" if iisland == 1: tdss,cunits,ctype = dssifh.read_rts(get_pathname(dssifh,path,5)) tdss_ro,cunits,ctype = dssifh2.read_rts(get_pathname(dssifh2,path2,5)) tdss.iloc[:,0] += tdss_ro.iloc[:,0] else: ttss2,cunits,ctype = dssifh.read_rts(get_pathname(dssifh,path,5)) ttss_ro2,cunits,ctype = dssifh2.read_rts(get_pathname(dssifh2,path2,5)) tdss.iloc[:,0] += ttss2.iloc[:,0] + ttss_ro2.iloc[:,0] path = "/CALSIM/"+C3_paths[ipath]+"_"+C3_nodes[c3j]+"/"+C3_paths[ipath]+"//1MON/L2015A/" dssofh.write_rts(path,tdss,cunits,ctype) dssifh=pyhecdss.DSSFile(inputfile) pathin = "/DICU-HIST+RSVR/BBID/"+DSM2N_paths[ipath]+"//1MON/DWR-BDO/" tdssb,cunits,ctype = dssifh.read_rts(get_pathname(dssifh,pathin,5)) pathout = "/CALSIM/"+C3_paths[ipath]+"_"+C3_nodes[len(C3_nodes)-1]+"/"+C3_paths[ipath]+"//1MON/L2015A/" dssofh.write_rts(pathout,tdssb,cunits,ctype) dssifh.close() if ipath == 2: dssifh2.close() dssofh.close()
import pyhecdss import datetime if __name__ == '__main__': pyhecdss.set_message_level(0) d = pyhecdss.DSSFile('./ITP_PP_out_ec.dss') s = datetime.datetime.now() catdf = d.read_catalog() print('catalog read in :', datetime.datetime.now() - s) plist = d.get_pathnames() print('Reading ', len(plist), '...') s = datetime.datetime.now() for path in plist: si = datetime.datetime.now() df, u, p = d.read_rts(path) print('read ', path, ' in ', datetime.datetime.now() - si) print('read all in ', datetime.datetime.now() - s)
def test_catalog(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) dssfile.catalog() self.assertTrue(os.path.exists('test1.dsc')) self.assertTrue(os.path.exists('test1.dsd'))
def test_open_close(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) dssfile.open() dssfile.close()
def read_dss_to_df(fpath, bparts_to_read=None, cparts_to_read=None, eparts_to_read=None, start_date_str=None, end_date_str=None, with_metadata=False): """ Convert a DSS File into a dataframe. Parameters: ----------- fpath : str path to the DSS File bparts_to_read: list, optional list of part B to read in. If it is none, all available paths are read. start_date_str : str, optional this string should be a date in the format '%Y%m%d' and should refer to the earliest date to fetch (see http://strftime.org/) end_date_str : str, optional this string should be a date in the format '%Y%m%d' and should refer to the last date to fetch (see http://strftime.org/) with_metadata: boolean, optional If true, add two columns for unit and time. Returns: -------- pandas.DataFrame : This data frame will contain all data contained within the entire DSS file given as input. """ pyhecdss.set_message_level(2) dssfile = pyhecdss.DSSFile(fpath) # create DSSFile object paths = dssfile.get_pathnames() # fetch all internal paths dfs = [] for path in paths: parts = path.split('/') if bparts_to_read is not None and parts[2] not in bparts_to_read: continue if cparts_to_read is not None and parts[3] not in cparts_to_read: continue if eparts_to_read is not None and parts[5] not in eparts_to_read: continue data, cunits, ctype = dssfile.read_rts(path, start_date_str, end_date_str) try: data.index = data.index.to_timestamp() except: pass data = pd.melt(data.reset_index(), id_vars=['index'], value_vars=[path], var_name='pathname') data.rename(columns={'index': 'time'}, inplace=True) if with_metadata: data['cunits'] = cunits data['ctype'] = ctype dfs.append(data) if dfs is None: raise ValueError('No timeseries is read') df = pd.concat(dfs) return df
def test_with(self): with pyhecdss.DSSFile('test1.dss') as d: assert len(d.read_catalog()) > 0
def test_get_pathnames(self): fname = "test1.dss" dssfile = pyhecdss.DSSFile(fname) pathnames = dssfile.get_pathnames() self.assertTrue(len(pathnames) > 0)