def do_mb(): dir='Z:/calibration/' type='FLOW' stime='01JAN2008 0000' etime='31DEC2008 2400' fileobs=dir+'observed/observed_flow_stage_for_compare_plots.dss' stations=['RSAC128','SSS','SUT','RSAC155'] direction=[-1,-1,-1,1] dss=HecDss.open(fileobs,True) dss.setTimeWindow(stime,etime) plot=newPlot('Mass Balance Components (SAC)') mass_balance=None for i in range(0,len(stations)): sta=stations[i] dir=direction[i] data=get_matching(dss,'A=%s C=%s E=15MIN'%(sta,type)) if data==None: data=get_matching(dss,'A=%s C=%s E=1HOUR'%(sta,type)) data=TimeSeriesMath(data).transformTimeSeries("1HOUR", "", "AVE", 0) data=TimeSeriesMath(data.data) if dir==-1: data=data.negative(); plot.addData(data.data) if mass_balance==None: mass_balance=data else: mass_balance=mass_balance.add(data) plot.showPlot(); return mass_balance
def do_mb(): dir = 'Z:/calibration/' type = 'FLOW' stime = '01JAN2008 0000' etime = '31DEC2008 2400' fileobs = dir + 'observed/observed_flow_stage_for_compare_plots.dss' stations = ['RSAC128', 'SSS', 'SUT', 'RSAC155'] direction = [-1, -1, -1, 1] dss = HecDss.open(fileobs, True) dss.setTimeWindow(stime, etime) plot = newPlot('Mass Balance Components (SAC)') mass_balance = None for i in range(0, len(stations)): sta = stations[i] dir = direction[i] data = get_matching(dss, 'A=%s C=%s E=15MIN' % (sta, type)) if data == None: data = get_matching(dss, 'A=%s C=%s E=1HOUR' % (sta, type)) data = TimeSeriesMath(data).transformTimeSeries("1HOUR", "", "AVE", 0) data = TimeSeriesMath(data.data) if dir == -1: data = data.negative() plot.addData(data.data) if mass_balance == None: mass_balance = data else: mass_balance = mass_balance.add(data) plot.showPlot() return mass_balance
def do_compare(paths, dssfiles, title, doAverage=False, diffToFirst=False): data = [] for i in range(len(paths)): d = get_matching(dssfiles[i], paths[i]) if doAverage: d = average(d, "1DAY") data.append(d) if diffToFirst: for i in range(1, len(paths)): diff = TimeSeriesMath(data[i]).subtract(TimeSeriesMath(data[0])) diff.container.location = d.location + '-DIFF' data.append(diff.data) plot(data, title)
def create_regression_line(drun, dobs, legend): drunm = TimeSeriesMath(drun) dobsm = TimeSeriesMath(dobs) paired = dobsm.generateDataPairs(drunm, False) pairedData = paired.data pairedData.fullName = legend reg = dobsm.multipleRegression([drunm], HecMath.UNDEFINED, HecMath.UNDEFINED) regData = reg.data a = regData.yOrdinates[0][1] b = regData.yOrdinates[0][0] regData.fullName = "//REGRESSION LINE////GENERATED/" maxVal = drunm.max() minVal = drunm.min() regData.xOrdinates[0] = a * minVal + b regData.xOrdinates[1] = a * maxVal + b regData.yOrdinates[0][0] = minVal regData.yOrdinates[0][1] = maxVal regData.yunits = pairedData.yunits regData.xunits = pairedData.xunits regData.xtype = pairedData.xtype regData.ytype = pairedData.ytype regData.xparameter = pairedData.xparameter regData.yparameter = pairedData.yparameter regData.location = pairedData.location regData.version = 'LINEAR REGRESSION' return regData, pairedData
def timeWindowMod(runtimeWindow, alternative, computeOptions): originalRTW = computeOptions.getRunTimeWindow() dssFile = DSS.open(computeOptions.getDssFilename(), originalRTW.getTimeWindowString()) # pathname for breaches twmTSM = TimeSeriesMath(alternative.getTimeSeries()) # assumes this is the mapped input to TWM twmPath = twmTSM.getPath().split("/") # use this for e/f parts breachPath = "/".join(["", "","BREACHTRACKER-TIMESTEPS REMAINING","TIMESTEPS REMAINING","",twmPath[5], twmPath[6], ""]) # find start and end of breach timeseries breaches = dssFile.read(breachPath) dssFile.done() breachTSC = breaches.getData() start, end = None, None rtwStart = runtimeWindow.getStartTime().value() newStart = HecTime() # keep track of start time that is a valid ResSim timestep for t,v in zip(breachTSC.times, breachTSC.values): if v > 0: if start is None: # first non-zero start = t end = t # update until original start time occurs, make sure this is prev. timestep in ResSim # avoids interpolated input on start timestep in RAS if t <= rtwStart: newStart.set(t) # no breach if start is None: runtimeWindow.setStartTime(newStart) return runtimeWindow # compare and adjust if needed startTime = HecTime() startTime.set(start) startTime.subtractDays(RAS_START_BUFFER) # add days to give RAS a little spin up time if startTime <= runtimeWindow.getStartTime(): runtimeWindow.setStartTime(startTime) endTime = HecTime() endTime.set(end) endTime.addDays(RAS_END_BUFFER) # buffer at end if endTime >= runtimeWindow.getEndTime(): runtimeWindow.setEndTime(endTime) alternative.addComputeMessage("New time window set: %s" % runtimeWindow.getTimeWindowString()) return runtimeWindow
def ts_normalize(data): """ Normalize time series by dividing by data's mean """ datan = data.divide(TimeSeriesMath(data).mean()) datan.fullName = datan.fullName + "-NORMED" return datan.data
def calculate_rms(run, obs): runt = TimeSeriesMath(run) obst = TimeSeriesMath(obs) tavg = obst.abs().sum() / obst.numberValidValues() diff = runt.subtract(obst) return math.fabs( math.sqrt(diff.multiply(diff).sum() / diff.numberValidValues()) / tavg) * math.log(tavg)
def ts_period_operation(data, interval="1DAY", operation_type="AVE"): """ transforms the time series using a period operation with given interval (1DAY (default), 1HOUR, etc) and given operation type (AVE (default), MAX, MIN) """ tdata = TimeSeriesMath(data).transformTimeSeries(interval, None, operation_type, 0) tdata.data.fullName = tdata.data.fullName + operation_type return tdata.data
def create_regression_line(drun,dobs,legend): drunm=TimeSeriesMath(drun) dobsm=TimeSeriesMath(dobs) paired=dobsm.generateDataPairs(drunm,False) pairedData = paired.data pairedData.fullName=legend reg=dobsm.multipleRegression([drunm],HecMath.UNDEFINED, HecMath.UNDEFINED) regData=reg.data a=regData.yOrdinates[0][1] b=regData.yOrdinates[0][0] regData.fullName="//REGRESSION LINE////GENERATED/" maxVal=drunm.max() minVal=drunm.min() regData.xOrdinates[0]=a*minVal+b regData.xOrdinates[1]=a*maxVal+b regData.yOrdinates[0][0]=minVal regData.yOrdinates[0][1]=maxVal regData.yunits = pairedData.yunits regData.xunits = pairedData.xunits regData.xtype=pairedData.xtype regData.ytype=pairedData.ytype regData.xparameter=pairedData.xparameter regData.yparameter=pairedData.yparameter regData.location=pairedData.location regData.version='LINEAR REGRESSION' return regData,pairedData
def average(data, average_interval): """ filter_type is one of "INT" - Interpolate at end of interval "MAX" - Maximum over interval "MIN" - Minimum over interval "AVE" - Average over interval "ACC" - Accumulation over interval "ITG" - Integration over interval "NUM" - Number of valid data over interval """ filter_type = "AVE" return TimeSeriesMath(data).transformTimeSeries(average_interval, None, 'AVE', 0).data
fileName = R"C:\project\DSSVue-Example-Scripts\src\CDEC\Oroville.dss" cdecName = R"C:\project\DSSVue-Example-Scripts\src\CDEC\Oroville.cdec" daysBack = 10 #readFromCDEC(fileName,cdecName,daysBack) dss = HecDss.open(fileName) storage = dss.read("//OROVILLE/STORAGE//1Day/CDEC/") inflow = dss.read("//OROVILLE/RESERVOIR INFLOW//1Day/CDEC/") printMath(storage) printMath(inflow) changeInStorage = storage.successiveDifferences().divide(1.98347) tsc = changeInStorage.getData() tsc.units = "CFS" tsc.type = "" tsc.parameter = "Change in Storage" changeInStorage = TimeSeriesMath(tsc) printMath(changeInStorage) outflow = inflow.subtract(changeInStorage) tsc = outflow.getData() tsc.parameter = "OUTFLOW" outflow = TimeSeriesMath(tsc) printMath(outflow) # inflow - outflow = [change in storage] # outflow = inflow - [change in storage] dss.done() sys.stdin.readline()
def calculate_rms(run,obs): runt = TimeSeriesMath(run) obst = TimeSeriesMath(obs) tavg = obst.abs().sum()/obst.numberValidValues() diff=runt.subtract(obst) return math.fabs(math.sqrt(diff.multiply(diff).sum()/diff.numberValidValues())/tavg)*math.log(tavg)
def doall(locations, fileobs, filerun1, filerun2, stime, etime, imageDir='d:/temp', weights=None, filter_type="AVE", normalize=False): obs = HecDss.open(fileobs, True) obs.setTimeWindow(stime, etime) run1 = HecDss.open(filerun1, True) run1.setTimeWindow(stime, etime) if filerun2 != None: run2 = HecDss.open(filerun2, True) run2.setTimeWindow(stime, etime) else: run2 = None rms1 = 0 rms1_min, rms1_max = 0, 0 rms2 = 0 rms2_min, rms2_max = 0, 0 rmsmap = {} #run2=None sumwts = 0 average_interval = None for l in locations: data1 = get_matching(obs, 'A=%s C=%s E=15MIN' % (l, type)) if data1 == None: data1 = get_matching(obs, 'A=%s C=%s E=1DAY' % (l, type)) if data1 == None: data1 = get_matching(obs, 'A=%s C=%s E=IR-DAY' % (l, type)) if data1 == None: data1 = get_matching(obs, 'A=%s C=%s E=1HOUR' % (l, type)) drun1 = get_matching(run1, 'B=%s C=%s' % (l, type)) if run2 != None: drun2 = get_matching(run2, 'B=%s C=%s' % (l, type)) else: drun2 = None avg_intvl = "1DAY" if data1 != None: if average_interval != None: dobsd = TimeSeriesMath(data1).transformTimeSeries( average_interval, None, filter_type, 0) else: dobsd = TimeSeriesMath(data1) if normalize: dobsd = dobsd.divide(TimeSeriesMath(data1).mean()) dobsm = TimeSeriesMath(data1).transformTimeSeries( avg_intvl, None, filter_type, 0) dobsm_max = TimeSeriesMath(data1).transformTimeSeries( avg_intvl, None, "MAX", 0) dobsm_max.data.fullName = dobsm_max.data.fullName + "MAX" dobsm_min = TimeSeriesMath(data1).transformTimeSeries( avg_intvl, None, "MIN", 0) dobsm_min.data.fullName = dobsm_min.data.fullName + "MIN" if normalize: dobsm = dobsm.divide(TimeSeriesMath(data1).mean()) if drun1 == None: continue else: if average_interval != None: drun1d = TimeSeriesMath(drun1).transformTimeSeries( average_interval, None, filter_type, 0) else: drun1d = TimeSeriesMath(drun1) if normalize: drun1d = drun1d.divide(TimeSeriesMath(drun1).mean()) if drun2 != None: if average_interval != None: drun2d = TimeSeriesMath(drun2).transformTimeSeries( average_interval, None, filter_type, 0) else: drun2d = TimeSeriesMath(drun2) if normalize: drun2d = drun2d.divide(TimeSeriesMath(drun2).mean()) drun1m = TimeSeriesMath(drun1).transformTimeSeries( avg_intvl, None, filter_type, 0) drun1m_max = TimeSeriesMath(drun1).transformTimeSeries( avg_intvl, None, "MAX", 0) drun1m_min = TimeSeriesMath(drun1).transformTimeSeries( avg_intvl, None, "MIN", 0) if normalize: drun1m = drun1m.divide(TimeSeriesMath(drun1).mean()) if drun2 != None: drun2m = TimeSeriesMath(drun2).transformTimeSeries( avg_intvl, None, filter_type, 0) drun2m_max = TimeSeriesMath(drun2).transformTimeSeries( avg_intvl, None, "MAX", 0) drun2m_min = TimeSeriesMath(drun2).transformTimeSeries( avg_intvl, None, "MIN", 0) if normalize: drun2m = drun2m.divide(TimeSeriesMath(drun2).mean()) else: drun2m = None if weights != None: sumwts = sumwts + weights[l] lrms1 = calculate_rms(drun1m.data, dobsm.data) * weights[l] lrms1_min = calculate_rms(drun1m_min.data, dobsm_min.data) * weights[l] lrms1_max = calculate_rms(drun1m_max.data, dobsm_max.data) * weights[l] rms1 = rms1 + lrms1 rms1_min = rms1_min + lrms1_min rms1_max = rms1_max + lrms1_max lrms2 = calculate_rms(drun2m.data, dobsm.data) * weights[l] lrms2_min = calculate_rms(drun2m_min.data, dobsm_min.data) * weights[l] lrms2_max = calculate_rms(drun2m_max.data, dobsm_max.data) * weights[l] rmsmap[ l] = lrms1, lrms2, lrms1_min, lrms2_min, lrms1_max, lrms2_max rms2 = rms2 + lrms2 rms2_min = rms2_min + lrms2_min rms2_max = rms2_max + lrms2_max plotd = newPlot("Hist vs New Geom [%s]" % l) if data1 != None: plotd.addData(dobsd.data) plotd.addData(drun1d.data) if drun2 != None: plotd.addData(drun2d.data) plotd.showPlot() legend_label = plotd.getLegendLabel(drun1d.data) legend_label.setText(legend_label.getText() + " [" + str(int(lrms1 * 100) / 100.) + "," + str(int(lrms1_min * 100) / 100.) + "," + str(int(lrms1_max * 100) / 100.) + "]") legend_label = plotd.getLegendLabel(drun2d.data) legend_label.setText(legend_label.getText() + " [" + str(int(lrms2 * 100) / 100.) + "," + str(int(lrms2_min * 100) / 100.) + "," + str(int(lrms2_max * 100) / 100.) + "]") plotd.setVisible(False) xaxis = plotd.getViewport(0).getAxis("x1") vmin = xaxis.getViewMin() + 261500. # hardwired to around july 1, 2008 xaxis.setViewLimits(vmin, vmin + 10000.) if data1 != None: pline = plotd.getCurve(dobsd.data) pline.setLineVisible(1) pline.setLineColor("blue") pline.setSymbolType(Symbol.SYMBOL_CIRCLE) pline.setSymbolsVisible(0) pline.setSymbolSize(3) pline.setSymbolSkipCount(0) pline.setSymbolFillColor(pline.getLineColorString()) pline.setSymbolLineColor(pline.getLineColorString()) g2dPanel = plotd.getPlotpanel() g2dPanel.revalidate() g2dPanel.paintGfx() plotm = newPlot("Hist vs New Geom Monthly [%s]" % l) plotm.setSize(1800, 1200) if data1 != None: plotm.addData(dobsm.data) #plotm.addData(dobsm_max.data) #plotm.addData(dobsm_min.data) plotm.addData(drun1m.data) #plotm.addData(drun1m_max.data) #plotm.addData(drun1m_min.data) if drun2 != None: plotm.addData(drun2m.data) #plotm.addData(drun2m_max.data) #plotm.addData(drun2m_min.data) plotm.showPlot() if data1 != None: pline = plotm.getCurve(dobsm.data) pline.setLineVisible(1) pline.setLineColor("blue") pline.setSymbolType(Symbol.SYMBOL_CIRCLE) pline.setSymbolsVisible(0) pline.setSymbolSize(3) pline.setSymbolSkipCount(0) pline.setSymbolFillColor(pline.getLineColorString()) pline.setSymbolLineColor(pline.getLineColorString()) plotm.setVisible(False) if data1 != None: plots = do_regression_plots(dobsm, drun1m, drun2m) if plots != None: spanel = plots.getPlotpanel() removeToolbar(spanel) mpanel = plotm.getPlotpanel() removeToolbar(mpanel) dpanel = plotd.getPlotpanel() removeToolbar(dpanel) from javax.swing import JPanel, JFrame from java.awt import GridBagLayout, GridBagConstraints mainPanel = JPanel() mainPanel.setLayout(GridBagLayout()) c = GridBagConstraints() c.fill = c.BOTH c.weightx, c.weighty = 0.5, 1 c.gridx, c.gridy, c.gridwidth, c.gridheight = 0, 0, 10, 4 if data1 != None: if plots != None: pass #mainPanel.add(spanel,c) c.gridx, c.gridy, c.gridwidth, c.gridheight = 0, 0, 10, 4 c.weightx, c.weighty = 1, 1 mainPanel.add(mpanel, c) c.gridx, c.gridy, c.gridwidth, c.gridheight = 0, 4, 10, 6 mainPanel.add(dpanel, c) fr = JFrame() fr.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE) fr.getContentPane().add(mainPanel) fr.setSize(1100, 850) fr.show() mainPanel.setSize(1100, 850) mainPanel.setBackground(Color.WHITE) #import time; time.sleep(5) saveToPNG(mainPanel, imageDir + l + ".png") if weights != None: rms1 = (rms1 + rms1_min + rms1_max) / sumwts rms2 = (rms2 + rms2_min + rms2_max) / sumwts print 'RMS Run 1: %f' % rms1 print 'RMS Run 2: %f' % rms2 for loc in rmsmap.keys(): print loc, rmsmap[loc]
def doall(locations, fileobs,filerun1,filerun2,stime,etime,imageDir='d:/temp',weights=None,filter_type="AVE",normalize=False): obs=HecDss.open(fileobs,True) obs.setTimeWindow(stime,etime) run1=HecDss.open(filerun1,True) run1.setTimeWindow(stime,etime) if filerun2 != None: run2=HecDss.open(filerun2,True) run2.setTimeWindow(stime,etime) else: run2=None rms1=0 rms1_min,rms1_max=0,0 rms2=0 rms2_min,rms2_max=0,0 rmsmap={} #run2=None sumwts=0 average_interval=None; for l in locations: data1=get_matching(obs,'A=%s C=%s E=15MIN'%(l,type)) if data1 == None: data1=get_matching(obs,'A=%s C=%s E=1DAY'%(l,type)) if data1 == None: data1=get_matching(obs,'A=%s C=%s E=IR-DAY'%(l,type)) if data1 == None: data1=get_matching(obs,'A=%s C=%s E=1HOUR'%(l,type)) drun1=get_matching(run1,'B=%s C=%s'%(l,type)) if run2 != None: drun2=get_matching(run2, 'B=%s C=%s'%(l,type)) else: drun2=None avg_intvl="1DAY" if data1 != None: if average_interval != None: dobsd=TimeSeriesMath(data1).transformTimeSeries(average_interval, None, filter_type, 0) else: dobsd=TimeSeriesMath(data1) if normalize: dobsd=dobsd.divide(TimeSeriesMath(data1).mean()) dobsm=TimeSeriesMath(data1).transformTimeSeries(avg_intvl, None, filter_type, 0) dobsm_max=TimeSeriesMath(data1).transformTimeSeries(avg_intvl, None, "MAX", 0) dobsm_max.data.fullName=dobsm_max.data.fullName+"MAX" dobsm_min=TimeSeriesMath(data1).transformTimeSeries(avg_intvl, None, "MIN", 0) dobsm_min.data.fullName=dobsm_min.data.fullName+"MIN" if normalize: dobsm=dobsm.divide(TimeSeriesMath(data1).mean()) if drun1==None: continue; else: if average_interval != None: drun1d=TimeSeriesMath(drun1).transformTimeSeries(average_interval, None, filter_type, 0) else: drun1d=TimeSeriesMath(drun1) if normalize: drun1d=drun1d.divide(TimeSeriesMath(drun1).mean()) if drun2 != None: if average_interval != None: drun2d=TimeSeriesMath(drun2).transformTimeSeries(average_interval, None, filter_type, 0) else: drun2d=TimeSeriesMath(drun2) if normalize: drun2d=drun2d.divide(TimeSeriesMath(drun2).mean()) drun1m=TimeSeriesMath(drun1).transformTimeSeries(avg_intvl, None, filter_type, 0) drun1m_max=TimeSeriesMath(drun1).transformTimeSeries(avg_intvl, None, "MAX", 0) drun1m_min=TimeSeriesMath(drun1).transformTimeSeries(avg_intvl, None, "MIN", 0) if normalize: drun1m=drun1m.divide(TimeSeriesMath(drun1).mean()) if drun2 != None: drun2m=TimeSeriesMath(drun2).transformTimeSeries(avg_intvl, None, filter_type, 0) drun2m_max=TimeSeriesMath(drun2).transformTimeSeries(avg_intvl, None, "MAX", 0) drun2m_min=TimeSeriesMath(drun2).transformTimeSeries(avg_intvl, None, "MIN", 0) if normalize: drun2m=drun2m.divide(TimeSeriesMath(drun2).mean()) else: drun2m=None if weights != None: sumwts=sumwts+weights[l] lrms1 = calculate_rms(drun1m.data, dobsm.data)*weights[l] lrms1_min=calculate_rms(drun1m_min.data,dobsm_min.data)*weights[l] lrms1_max=calculate_rms(drun1m_max.data,dobsm_max.data)*weights[l] rms1=rms1+lrms1 rms1_min=rms1_min+lrms1_min rms1_max=rms1_max+lrms1_max lrms2 = calculate_rms(drun2m.data,dobsm.data)*weights[l] lrms2_min=calculate_rms(drun2m_min.data,dobsm_min.data)*weights[l] lrms2_max=calculate_rms(drun2m_max.data,dobsm_max.data)*weights[l] rmsmap[l] = lrms1,lrms2,lrms1_min,lrms2_min,lrms1_max,lrms2_max rms2=rms2+lrms2 rms2_min=rms2_min+lrms2_min rms2_max=rms2_max+lrms2_max plotd = newPlot("Hist vs New Geom [%s]"%l) if data1 != None: plotd.addData(dobsd.data) plotd.addData(drun1d.data) if drun2 != None: plotd.addData(drun2d.data) plotd.showPlot() legend_label = plotd.getLegendLabel(drun1d.data) legend_label.setText(legend_label.getText()+" ["+str(int(lrms1*100)/100.)+","+str(int(lrms1_min*100)/100.)+","+str(int(lrms1_max*100)/100.)+"]") legend_label = plotd.getLegendLabel(drun2d.data) legend_label.setText(legend_label.getText()+" ["+str(int(lrms2*100)/100.)+","+str(int(lrms2_min*100)/100.)+","+str(int(lrms2_max*100)/100.)+"]") plotd.setVisible(False) xaxis=plotd.getViewport(0).getAxis("x1") vmin =xaxis.getViewMin()+261500. # hardwired to around july 1, 2008 xaxis.setViewLimits(vmin,vmin+10000.) if data1 != None: pline = plotd.getCurve(dobsd.data) pline.setLineVisible(1) pline.setLineColor("blue") pline.setSymbolType(Symbol.SYMBOL_CIRCLE) pline.setSymbolsVisible(0) pline.setSymbolSize(3) pline.setSymbolSkipCount(0) pline.setSymbolFillColor(pline.getLineColorString()) pline.setSymbolLineColor(pline.getLineColorString()) g2dPanel = plotd.getPlotpanel() g2dPanel.revalidate(); g2dPanel.paintGfx(); plotm = newPlot("Hist vs New Geom Monthly [%s]"%l) plotm.setSize(1800,1200) if data1 != None: plotm.addData(dobsm.data) #plotm.addData(dobsm_max.data) #plotm.addData(dobsm_min.data) plotm.addData(drun1m.data) #plotm.addData(drun1m_max.data) #plotm.addData(drun1m_min.data) if drun2 != None: plotm.addData(drun2m.data) #plotm.addData(drun2m_max.data) #plotm.addData(drun2m_min.data) plotm.showPlot() if data1 != None: pline = plotm.getCurve(dobsm.data) pline.setLineVisible(1) pline.setLineColor("blue") pline.setSymbolType(Symbol.SYMBOL_CIRCLE) pline.setSymbolsVisible(0) pline.setSymbolSize(3) pline.setSymbolSkipCount(0) pline.setSymbolFillColor(pline.getLineColorString()) pline.setSymbolLineColor(pline.getLineColorString()) plotm.setVisible(False) if data1 != None: plots=do_regression_plots(dobsm,drun1m,drun2m) if plots != None: spanel = plots.getPlotpanel() removeToolbar(spanel) mpanel = plotm.getPlotpanel() removeToolbar(mpanel) dpanel = plotd.getPlotpanel() removeToolbar(dpanel) from javax.swing import JPanel,JFrame from java.awt import GridBagLayout, GridBagConstraints mainPanel = JPanel() mainPanel.setLayout(GridBagLayout()) c=GridBagConstraints() c.fill=c.BOTH c.weightx,c.weighty=0.5,1 c.gridx,c.gridy,c.gridwidth,c.gridheight=0,0,10,4 if data1 != None: if plots != None: pass #mainPanel.add(spanel,c) c.gridx,c.gridy,c.gridwidth,c.gridheight=0,0,10,4 c.weightx,c.weighty=1,1 mainPanel.add(mpanel,c) c.gridx,c.gridy,c.gridwidth,c.gridheight=0,4,10,6 mainPanel.add(dpanel,c) fr=JFrame() fr.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE) fr.getContentPane().add(mainPanel) fr.setSize(1100,850); fr.show(); mainPanel.setSize(1100,850); mainPanel.setBackground(Color.WHITE); #import time; time.sleep(5) saveToPNG(mainPanel,imageDir+l+".png") if weights != None: rms1=(rms1+rms1_min+rms1_max)/sumwts rms2=(rms2+rms2_min+rms2_max)/sumwts print 'RMS Run 1: %f'%rms1 print 'RMS Run 2: %f'%rms2 for loc in rmsmap.keys(): print loc, rmsmap[loc]
def put_to_dss(site, dss): """Save timeseries to DSS File Parameters ---------- site: json JSON object containing meta data about the site/parameter combination, time array and value array dss: HecDss DSS file object The open DSS file records are written to Returns ------- None Raises ------ Put to DSS exception handled with a message output saying site not saved, but continues on trying additional site/parameter combinations """ Site = namedtuple( 'Site', site.keys() )(**site) parameter, unit, data_type, version = usgs_code[Site.code] times = [ HecTime(t, HecTime.MINUTE_GRANULARITY).value() for t in Site.times ] timestep_min = None for i, t in enumerate(range(len(times) - 1)): ts = abs(times[t + 1] - times[t]) if ts < timestep_min or timestep_min is None: timestep_min = ts epart = TimeStep().getEPartFromIntervalMinutes(timestep_min) # Set the pathname pathname = '/{0}/{1}/{2}//{3}/{4}/'.format(ws_name, Site.site_number, parameter, epart, version).upper() apart, bpart, cpart, _, _, fpart = pathname.split('/')[1:-1] container = TimeSeriesContainer() container.fullName = pathname container.location = apart container.parameter = parameter container.type = data_type container.version = version container.interval = timestep_min container.units = unit container.times = times container.values = Site.values container.numberValues = len(Site.times) container.startTime = times[0] container.endTime = times[-1] container.timeZoneID = tz # container.makeAscending() if not TimeSeriesMath.checkTimeSeries(container): return 'Site: "{}" not saved to DSS'.format(Site.site_number) tsc = TimeSeriesFunctions.snapToRegularInterval(container, epart, "0MIN", "0MIN", "0MIN") # Put the data to DSS try: dss.put(tsc) except Exception as ex: print(ex) return 'Site: "{}" not saved to DSS'.format(Site.site_number)
for t,v in zip(newTSC.times, newTSC.values): if v > 0: if int(v + epsilon) != int(v): v = int(v + epsilon) elif int(v - epsilon) != int(v): v = int(v) else: if int(v + epsilon) != int(v): v = int(v) elif int(v - epsilon) != int(v): v = int(v - epsilon) newTimes.append(t) newValues.append(float(v)) newTSC.times = newTimes newTSC.values = newValues newTSM = TimeSeriesMath(newTSC) return newTSM else: # Return an integer type if the answer is an integer if int(value) == value: return int(value) # If Python made some silly precision error # like x.99999999999996, just return x + 1 as an integer epsilon = 0.0000000001 if value > 0: if int(value + epsilon) != int(value): return int(value + epsilon) elif int(value - epsilon) != int(value): return int(value)
def timeWindowMod(runtimeWindow, alternative, computeOptions): t = time.time() # Load the active paths: # Replace file names and imports with: dssFileName = computeOptions.getDssFilename() dssFileObj = HecDss.open(dssFileName) paths = dssFileObj.getCatalogedPathnames() print "\nStarting Script..." # Read the datum shifts. NOTE THAT THESE NEED UPDATED HERE AND IN THE STATE VARIABLE EDITOR. datumShiftDict = {"ALBENI FALLS" : 3.9, \ "AMERICAN FALLS" : 3.3, "ANDERSON RANCH" : 3.4, "ARROW LAKES" : 4.3, \ "ARROWROCK" : 3.4, "BONNEVILLE" : 3.3, "BOUNDARY" : 4, \ "BOX CANYON" : 4., "BRILLIANT" : 4.2, "BROWNLEE" : 3.3, \ "BUMPING LAKE" : 3.9, "CABINET GORGE" : 3.9, "CASCADE" : 3.6, \ "CHELAN" : 3.9, "CHIEF JOSEPH" : 4., "CLE ELUM" : 3.9, \ "CORRA LINN" : 4.3, "DEADWOOD" : 4., "DUNCAN" : 4.3, \ "DWORSHAK" : 3.3, "GRAND COULEE" : 3.9, "HELLS CANYON" : 3.6, \ "HUNGRY HORSE" : 3.9, "ICE HARBOR" : 3.4, "JACKSON LAKE" : 4.3, \ "JOHN DAY" : 3.2, "KACHESS" : 3.9, "KEECHELUS" : 4., \ "SKQ" : 3.6, "LIBBY" : 3.9, "LITTLE FALLS" : 3.8, "LITTLE GOOSE" : 3.2, \ "LONG LAKE" : 3.8, "LOWER BONNINGTON" : 4.2, "LOWER GRANITE" : 3.4, \ "LOWER MONUMENTAL" : 3.3, "LUCKY PEAK" : 3.3, "MCNARY" : 3.3, \ "MICA" : 4.7, "MONROE STREET" : 3.8, "NINE MILE" : 3.8, \ "NOXON RAPIDS" : 3.9, "OWYHEE" : 3.3, "OXBOW" : 3.4, \ "PALISADES" : 4., "PELTON" : 3.6, "PELTON REREG" : 3.5, \ "POST FALLS" : 3.8, "PRIEST LAKE" : 4., "PRIEST RAPIDS" : 3.5, \ "REVELSTOKE" : 4.5, "ROCK ISLAND" : 3.7, "ROCKY REACH" : 3.8, \ "ROUND BUTTE" : 3.6, "SEVEN MILE" : 4.1, "SLOCAN" : 4.2, \ "THE DALLES" : 3.3, "THOMPSON FALLS" : 3.8, "TIETON" : 3.8, \ "UPPER BONNINGTON" : 4.2, "UPPER FALLS" : 3.8, "WANAPUM" : 3.5, \ "WANETA" : 4., "WELLS" : 4.} # Add NGVD29 elevations for path in paths: pathParts = path.split( "/") # Split path name to separate out A through F parts. # This split has the following format: [u'', u'A part', u'B part', u'C part', u'Start time', u'E part', u'F part', u''] aPart = pathParts[1] bPart = pathParts[2] cPart = pathParts[3] dPart = pathParts[4] ePart = pathParts[5] fPart = pathParts[6] # Skip lots of paths if this is an FRA run: if computeOptions.isFrmCompute(): CurrentEvent = computeOptions.getCurrentEventNumber() fPartCurrentEventText = "C:" + "%06d" % CurrentEvent + "|" if fPartCurrentEventText not in fPart: continue # Don't run unless it's the current event. if "ELEV" in cPart and "NGVD29" not in cPart: # Skip any records that have the "NGVD29" string tsCont = dssFileObj.get(path) # Create the time series container. Make sure its properties are defined correctly. tsContNewFile = tsCont tsContNewFile.watershed = aPart tsContNewFile.location = bPart tsContNewFile.version = fPart for key in datumShiftDict.keys( ): # Loop for all locations for which datum shifts defined if key in bPart: # Check to see if the BPart matches a datum shift location cPartNew = cPart + "-NGVD29" # Append a -NGVD29 to make it obvious # Test if FRA run if hasattr( tsContNewFile, 'yOrdinates' ): # yOrdinates are present in paired data container objects, which are created in CRSO FRA post processing. for j in range(len(tsCont.yOrdinates)): try: for k in range(len( tsContNewFile.yOrdinates[j])): tsContNewFile.yOrdinates[j][ k] = tsContNewFile.yOrdinates[j][k] - ( datumShiftDict[key] ) # Convert to NGVD29 tsContNewFile.yparameter = cPartNew tsContNewFile.fullName = "/%s/%s/%s/%s/%s/%s/" % ( aPart, bPart, cPartNew, dPart, ePart, fPart) except: alternative.addComputeMessage( "Problem applying datum shift to " + path ) # Note paths that couldn't process. Don't stop script. continue # Otherwise: else: try: for j in range(len(tsCont.values)): tsContNewFile.values[j] = tsContNewFile.values[ j] - datumShiftDict[key] tsContNewFile.parameter = cPartNew tsContNewFile.fullName = "/%s/%s/%s//%s/%s/" % ( aPart, bPart, cPartNew, ePart, fPart) except: alternative.addComputeMessage( "Problem applying datum shift to " + path ) # Note paths that couldn't process. Don't stop script. continue tsMathNewFile = TimeSeriesMath.createInstance( tsContNewFile ) # This will work for time series containers or paired data containers. dssFileObj.write( tsMathNewFile ) # This will work for time series containers or paired data containers. dssFileObj.done() elapsed = time.time() - t alternative.addComputeMessage( "TIME WINDOW MODIFIER FINISHED RUNNING. COMPUTE TIME IS " + str(elapsed)) print "\nComplete..." return runtimeWindow # Return and do not change runtimeWindow