def write_control(self, path , linkList = None): '''Writes the control.sav file used by the model to determine at which links store the obtained results.''' #If no link list it tries to obtain the control fronm the database if linkList is None: con = db.DataBaseConnect(user='******', password='******') if self.linkid > 0: q = db.sql.SQL("SELECT distinct \ us.link_id \ FROM \ pers_nico.iowa_usgs_stations us, \ pers_nico.subbasin("+str(self.linkid)+") sub \ where us.link_id = sub") elif self.linkid == 0: q = db.sql.SQL("SELECT distinct \ us.link_id \ FROM \ pers_nico.iowa_usgs_stations us, \ pers_nico.master_lambda_vo mas \ where us.link_id = mas.link_id") linkList = pd.read_sql(q, con).values.T.astype(int).tolist()[0] #Opens the file f = open(path,'w',newline = '\n') Links = self.Table.index.tolist() for l in linkList: try: Links.index(l) f.write('%d\n' % l) except: pass f.close() return linkList
def write_rvr(self, path = None, database = 'rt_precipitation'): #conncet to the database con = db.DataBaseConnect(user = '******', password = '******',) #restore_res_env_92 #Query to ask for the link ids and the topology if self.linkid > 0: q = db.sql.SQL("WITH all_links(id) AS (SELECT link_id FROM pers_nico.master_lambda_vo) \ SELECT all_links.id,pers_nico.master_lambda_vo.link_id FROM pers_nico.master_lambda_vo,all_links \ WHERE (all_links.id IN (SELECT nodeX.link_id FROM pers_nico.master_lambda_vo AS nodeX, \ pers_nico.master_lambda_vo AS parentX \ WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) AND parentX.link_id = "+str(self.linkid)+")) AND pers_nico.master_lambda_vo.parent_link = all_links.id ORDER BY all_links.id") elif self.linkid == 0: q = db.sql.SQL("WITH all_links(id) AS (SELECT link_id FROM pers_nico.master_lambda_vo) \ SELECT DISTINCT all_links.id,pers_nico.master_lambda_vo.link_id FROM pers_nico.master_lambda_vo,all_links \ WHERE all_links.id > 1 AND pers_nico.master_lambda_vo.model AND \ pers_nico.master_lambda_vo.parent_link = all_links.id ORDER BY all_links.id;") self.topo = pd.read_sql(q, con) con.close() topo = self.topo.values.T #Convert the query to a rvr file if path is not None: f = open(path,'w', newline='\n') f.write('%d\n\n' % topo.shape[1]) #List = self.Table.index.tolist() for t in topo[1]: #List.index(t) f.write('%d\n'% t) p = np.where(topo[0] == t)[0] if len(p)>0: f.write('%d ' % p.size) for i in p: f.write('%d ' % topo[1][i]) f.write('\n\n') else: f.write('0\n\n') f.close() #Check for consistency with the Table a = pd.Series(self.topo.shape[0], self.topo['link_id'].values) b = 0 for i in self.Table.index: if i in a.index: b += 1 else: self.Table = self.Table.drop(i, axis = 0)
def write_rainfall(self, date1, date2, path): '''Writes binary files for the rainfall of the defined watershed. - date1: initial date of the data to write. - date2: end date - path: where to store binary files eg: /home/user/basin/rain/BasinStage4_ Returns: - the mean rainfall of the watershed for the selected period''' #Databse connection and query. unix1 = str(aux.__datetime2unix__(date1)) unix2 = str(aux.__datetime2unix__(date2)) con = db.DataBaseConnect(database='rt_precipitation') if self.linkid > 0: q = db.sql.SQL("WITH subbasin AS (SELECT nodeX.link_id AS link_id FROM students.env_master_km \ AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) \ AND parentX.link_id = "+str(self.linkid)+") SELECT A.unix_time,sum(weight*A.val) as rain,B.link_id FROM stage_4.data AS \ A,env_lookup_hrap_lid_v4 AS B,subbasin WHERE A.grid_x = B.x AND A.grid_y=B.y AND B.link_id = subbasin.link_id \ AND A.unix_time >= "+str(unix1)+" AND A.unix_time < "+str(unix2)+" AND A.val < 99.0 GROUP BY B.link_id,A.unix_time ORDER BY A.unix_time") else: q = db.sql.SQL("SELECT \ A.unix_time, \ sum(weight*A.val) as rain, \ B.link_id \ FROM \ stage_4.data AS A, \ env_lookup_hrap_lid_v4 AS B \ WHERE A.grid_x = B.x AND A.grid_y=B.y AND A.unix_time >= "+str(unix1)+" AND A.unix_time < "+str(unix2)+" AND A.val < 999.0 \ GROUP BY B.link_id,A.unix_time \ ORDER BY A.unix_time;") E = pd.read_sql(q, con, index_col='unix_time') con.close() #SEtup for the data with the mean rainfall for that period d = pd.date_range(date1, date2, freq='1H') MeanRain = pd.Series(np.zeros(d.size), index = d) #Rainfall binary files creation for i in np.arange(E.index[0], E.index[-1], 3600): Rain = E[['link_id','rain']][E.index == i] __saveBin__(Rain['link_id'].values, Rain['rain'].values, Rain['rain'].size,path+str(i)) MeanRain[pd.to_datetime(i,unit='s')] = Rain['rain'].mean() MeanRain[np.isnan(MeanRain) == True] = 0.0 return MeanRain
from ifis_tools import asynch_manager as am from ifis_tools import series_tools as ser import pandas as pd import glob import numpy as np def get_links2(l): links = [] for i in l: links.append(i.split('/')[-1].split('_')[0]) return links usgs = db.SQL_USGS_at_IFIS() #Get the link properties q = db.sql.SQL("select link_id, travel_time07, up_area from pers_nico.master_lambda_vo") con = db.DataBaseConnect(user='******', password='******') Lprop = pd.read_sql(q, con, index_col='link_id') con.close() perf = ser.performance(links_prop=Lprop, prop_col_names={'travel_time07':'ttime','up_area':'area'}) perf.update_dic('4', base=True, path='/Users/nicolas/Parameter_identifiability/data/hlm_outputs/south/*vr4*', abr = '4', isDataFrame = True, DataFrameColumn = 'Q', path2linkFunc = get_links2) perf.update_dic('5', path='/Users/nicolas/Parameter_identifiability/data/hlm_outputs/south/*vr5*', path2linkFunc=get_links2, isDataFrame = True, DataFrameColumn = 'Q')