Example #1
0
def SQL_Read_MeanRainfall(link_id,
                          date1,
                          date2,
                          schema='pers_nico',
                          table='s4mrain',
                          time_name='unix_time',
                          data_name='rain',
                          linkid_name='link_id'):
    '''DEPRECATED Read streamflow data from IIHR database "research_environment" 
    and returns it as a pandas.DataFrame element.
    Parameters:
        - usgs_id: code of the usgs.
        - date1: initial date of the query.
        - date2: final date of the query.
    Optional:
        - schema: where to obtain data in the databse.
        - table: master table with the usgs data.
        - time_name: the name of the column that has the time.
        - data_name: the name of the column that has the data.
        - usgs_name: the name of the column that has the id of the usgs stations.
    Returns:
        - pandas.DataFrame containing the streamflow data.'''
    #make the connection
    con = DataBaseConnect(user='******', password='******')
    #Work with dates and usgs id
    date1 = str(aux.__datetime2unix__(date1))
    date2 = str(aux.__datetime2unix__(date2))
    if type(link_id) is not str:
        link_id = str(link_id)
    #make the querty
    query = sql.SQL("SELECT " + time_name + ", " + data_name + " FROM " +
                    schema + "." + table + " WHERE " + time_name +
                    " BETWEEN " + date1 + " and " + date2 + " AND " +
                    linkid_name + "='" + link_id + "'")
    #Make the consult.
    Data = pd.read_sql(query,
                       con,
                       index_col='unix_time',
                       parse_dates={'unix_time': {
                           'unit': 's'
                       }})
    con.close()
    #Organize rainfall
    Data = Data.sort_index()
    Dates = pd.date_range(Data.index[0], Data.index[-1], freq='1h')
    Rain = pd.Series(np.zeros(Dates.size), Dates)
    Rain[Data.index] = Data['rain'].values
    Rain[Rain > 1000] = 0.0
    return Rain
Example #2
0
 def UpdateGlobals(self, dt, initial_old, initial_new):
     #Actual date
     date_old = self.dateEvent
     date_new = self.dateEvent + pd.Timedelta(dt)
     date_fut = date_new + pd.Timedelta(dt)
     #Get old and new dates
     date_old_t = date_old.strftime('%Y-%m-%d %H:%M')
     date_new_t = date_new.strftime('%Y-%m-%d %H:%M')
     date_fut_t = date_fut.strftime('%Y-%m-%d %H:%M')
     #Unix times to replace
     unix1 = str(aux.__datetime2unix__(date_old_t) + 12 * 3600)
     unix2 = str(aux.__datetime2unix__(date_new_t) + 12 * 3600)
     unix3 = str(aux.__datetime2unix__(date_fut_t) + 12 * 3600)
     #Creates the dictionary with the keys to replace
     D = {
         'date_f': {
             'old': date_new_t,
             'new': date_fut_t
         },
         'date_i': {
             'old': date_old_t,
             'new': date_new_t
         },
         'initial': {
             'old': initial_old,
             'new': initial_new
         },
         'unix_f': {
             'old': unix2,
             'new': unix3
         },
         'unix_i': {
             'old': unix1,
             'new': unix2
         }
     }
     #Creates the list for updates of the globals
     Lista = []
     for k in self.setups.keys():
         Lista.append([self.path + '/ForRun/' + k + '.gbl', D])
     #Update globals with a multiprocessing approach
     self.execute(WarpUpdateGlobals, Lista)
     self.dateEvent = date_new
Example #3
0
def SQL_read_USGS_Streamflow(usgs_id,
                             date1,
                             date2,
                             schema='pers_nico',
                             table='data_usgs',
                             time_name='unix_time',
                             data_name='val',
                             usgs_name='usgs_id'):
    '''Read streamflow data from IIHR database "research_environment" 
    and returns it as a pandas.DataFrame element.
    Parameters:
        - usgs_id: code of the usgs.
        - date1: initial date of the query.
        - date2: final date of the query.
    Optional:
        - schema: where to obtain data in the databse.
        - table: master table with the usgs data.
        - time_name: the name of the column that has the time.
        - data_name: the name of the column that has the data.
        - usgs_name: the name of the column that has the id of the usgs stations.
    Returns:
        - pandas.DataFrame containing the streamflow data.'''
    #make the connection
    con = DataBaseConnect(user='******', password='******')
    #Work with dates and usgs id
    date1 = str(aux.__datetime2unix__(date1))
    date2 = str(aux.__datetime2unix__(date2))
    if type(usgs_id) is not str:
        usgs_id = str(usgs_id)
    #make the querty
    query = sql.SQL("SELECT " + time_name + ", " + data_name + " FROM " +
                    schema + "." + table + " WHERE " + time_name +
                    " BETWEEN " + date1 + " and " + date2 + " AND " +
                    usgs_name + "='" + usgs_id + "'")
    #Make the consult.
    Data = pd.read_sql(query,
                       con,
                       index_col='unix_time',
                       parse_dates={'unix_time': {
                           'unit': 's'
                       }})
    con.close()
    return Data
Example #4
0
 def write_rainfall(self, date1, date2, path):
     '''Writes binary files for the rainfall of the defined watershed.
         - date1: initial date of the data to write.
         - date2: end date
         - path: where to store binary files eg: /home/user/basin/rain/BasinStage4_
     Returns:
         - the mean rainfall of the watershed for the selected period'''
     #Databse connection and query.
     unix1 = str(aux.__datetime2unix__(date1))
     unix2 = str(aux.__datetime2unix__(date2))
     con = db.DataBaseConnect(database='rt_precipitation')
     if self.linkid > 0:
         q = db.sql.SQL("WITH subbasin AS (SELECT nodeX.link_id AS link_id FROM students.env_master_km \
      AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) \
      AND parentX.link_id = "+str(self.linkid)+") SELECT A.unix_time,sum(weight*A.val) as rain,B.link_id FROM stage_4.data AS \
      A,env_lookup_hrap_lid_v4 AS B,subbasin WHERE A.grid_x = B.x AND A.grid_y=B.y AND B.link_id = subbasin.link_id \
      AND A.unix_time >= "+str(unix1)+" AND A.unix_time < "+str(unix2)+" AND A.val < 99.0 GROUP BY B.link_id,A.unix_time ORDER BY A.unix_time")
     else:
         q = db.sql.SQL("SELECT \
                 A.unix_time, \
                 sum(weight*A.val) as rain, \
                 B.link_id \
             FROM \
                 stage_4.data AS A, \
                 env_lookup_hrap_lid_v4 AS B \
             WHERE A.grid_x = B.x AND A.grid_y=B.y AND A.unix_time >= "+str(unix1)+" AND A.unix_time < "+str(unix2)+" AND A.val < 999.0 \
             GROUP BY B.link_id,A.unix_time \
             ORDER BY A.unix_time;")
     E = pd.read_sql(q, con, index_col='unix_time')
     con.close()
     #SEtup for the data with the mean rainfall for that period
     d = pd.date_range(date1, date2, freq='1H')
     MeanRain = pd.Series(np.zeros(d.size), index = d)
     #Rainfall binary files creation
     for i in np.arange(E.index[0], E.index[-1], 3600):
         Rain = E[['link_id','rain']][E.index == i]
         __saveBin__(Rain['link_id'].values, Rain['rain'].values, Rain['rain'].size,path+str(i))
         MeanRain[pd.to_datetime(i,unit='s')] = Rain['rain'].mean()
     MeanRain[np.isnan(MeanRain) == True] = 0.0
     return MeanRain
Example #5
0
def SQL_Get_MeanRainfall(linkID, date1, date2):
    '''Obtains the mean rainfall for the watershed associated to 
    a given linkID.
    Parameters:
        - linkID: linkID of the outlet of the basin.
        - date1: initial date (YYYY-MM-DD HH:MM).
        - date2: end date (YYYY-MM-DD HH:MM).
    Returns:
        - Rainfall: Pandas series with the mean rainfall in the basin.'''
    #SEt the connection
    con = DataBaseConnect(user='******',
                          password='******',
                          database='rt_precipitation')
    #Transform dates to unix
    unix1 = str(aux.__datetime2unix__(date1))
    unix2 = str(aux.__datetime2unix__(date2))
    linkID = str(linkID)
    #Set the query and obtains data
    q = sql.SQL(
        "WITH subbasin AS (SELECT nodeX.link_id AS link_id FROM students.env_master_km AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) AND parentX.link_id = "
        + str(linkID) +
        "), uparea as (SELECT up_area FROM students.env_master_km WHERE link_id= "
        + str(linkID) +
        "), lut as (SELECT x, y FROM env_lookup_hrap_lid_v4 WHERE link_id IN (SELECT * FROM subbasin) group by x, y) SELECT unix_time, sum(val)/(SELECT count(*) FROM lut) as rain FROM stage_4.data WHERE grid_x IN (SELECT x FROM lut) AND grid_y IN (SELECT y from lut) AND unix_time between "
        + unix1 + " AND " + unix2 + " group by unix_time order by unix_time;")
    Data = pd.read_sql(q,
                       con,
                       index_col='unix_time',
                       parse_dates={'unix_time': {
                           'unit': 's'
                       }})
    #close connection
    con.close()
    #Pos process data
    dates = pd.date_range(date1, date2, freq='1h')
    Rain = pd.Series(np.zeros(dates.size), dates)
    Rain[Data.index] = Data['rain']
    return Rain
Example #6
0
        fl.Evento.CreateBashFile(status='start',
                                 path='./Run_' + str(args.link) + '_' +
                                 dateText + '_' + lambdaName + '.sh')

        #First initial condition
        Initial = fl.Evento.path + '/ForRun/initial.dbc'
        initFlag = '3'

        #Creates the gbl files for each epoc
        first = 'si'
        for d1, d2 in zip(Dates[:-1], Dates[1:]):
            #Dates in string format
            d_initial = d1.strftime('%Y-%m-%d %H:%M')
            d_end = d2.strftime('%Y-%m-%d %H:%M')
            #Creates the list for execution
            unix1 = str(aux.__datetime2unix__(d_initial))  #+12*3600)
            unix2 = str(aux.__datetime2unix__(d_end))  #+12*3600)
            #Iterates
            Lista = []
            for c, rc in enumerate(args.rc):
                #Name of the project
                name = d1.strftime('%Y%m%d%H%M') + '_' + str(
                    c) + '_' + lambdaName
                #Parameters
                params = '6 0.75 %s -0.2 %s 0.1 2.2917e-05' % (lamb, str(rc))
                #List for that run
                L = [
                    d_initial, d_end, params, args.link, initFlag, Initial,
                    unix1, unix2, fl.Evento.path + '/Results/' + name + '.dat',
                    fl.Evento.path + '/ForRun/control.sav',
                    fl.Evento.path + '/Initial/' + name + '.h5',
Example #7
0
 def ASYNCH_setGlobal(self, gblBase = 'BaseGlobal.gbl', Links2SaveName = 'ControlPoints.sav',
     OutStatesName = 'OutputStates.dat', initial_name = None, initial_exist = False,
     snapName = None, snapTime = None, createSav = False):
     '''Edit the global file for asynch run.
     Parameters:
         - date1: the initial date of the simulation (YYYY-MM-DD HH:MM)
         - date2: the initial date of the simulation (YYYY-MM-DD HH:MM)
         - linkID: the number of the output link to make the simulation.
         - glbOut: rute and name of the output global file for asynch.
         - output: name of the file with the outputs.
         - peakflow: name of the file containing the links where to save.
     Optional:
         - parameters: running parameters for ASYNCH model.
         - unix1: start time of the execution.
         - unix2: end time of the execution.
         - glbBase: rute and name of the base globl file used for the excecutions.
     Outputs:
         This function writes a gbl file where gblOut indicates.'''
     # Copy the base global to a glbOut
     self.path_in_global = self.path_in +  self.name + '.gbl'
     comand = 'cp '+Path+self.model+gblBase+' '+self.path_in_global
     os.system(comand)
     #Copy the links2save file 
     if createSav is False:
         self.path_in_links2save = self.path_in + Links2SaveName
         comand = 'cp '+Path+self.links2save+' '+self.path_in_links2save
         os.system(comand)
     else:
         self.path_in_links2save = self.path_in+'control.sav'
         f = open(self.path_in_links2save,'w')
         f.write('%s' % self.linkID)
         f.close()
     #Set of the initial file for that link 
     if initial_name is None:            
         self.path_in_initial = self.path_in + self.name + '.dbc'
     else:
         self.path_in_initial = initial_name
     if initial_exist is False:
         self.__ASYNCH_setInitialFile__(self.path_in_initial,self.date1[:4],
             self.linkID)
     #Set the number of the initial depending on its extension 
     InitNumber = str(self.__ASYNCH_get_number(self.path_in_initial, whatfor='initial'))
     
     #Set the snapshot info
     if snapName is not None:
         Snap_flag = self.__ASYNCH_get_number(snapName, whatfor='snapshot')
         Snap_name = snapName
         if snapTime is not None:
             Snap_time = str(snapTime)
             if Snap_flag == 3:
                 Snap_flag = str(4)
         else:
             Snap_time = ''
         Snap_flag = str(Snap_flag)
     else:
         Snap_flag = 0; Snap_name = ''; Snap_time = ''
     
     #Set the name of the file with the output of the streamflow
     self.path_out_states = self.path_out + OutStatesName
     # Unix time are equal to date
     if self.unix1 is None:
         self.unix1 = aux.__datetime2unix__(self.date1) + 12*3600.
     textUnix1 = '%d' % self.unix1
     if self.unix2 is None:
         self.unix2 =aux.__datetime2unix__(self.date2) + 12*3600
     textUnix2 = '%d' % self.unix2
     # Parameters 
     Param = ' '.join(self.parameters)+'\n'
     # Replace parameters in the global file        
     DicToreplace = {'date1':{'to_search': '¿date1?', 'to_put': self.date1},
         'date2':{'to_search': '¿date2?', 'to_put': self.date2},
         'unix1':{'to_search': '¿unix1?', 'to_put': textUnix1},
         'unix2':{'to_search': '¿unix2?', 'to_put': textUnix2},
         'linkID':{'to_search': '¿linkID?', 'to_put': self.linkID},
         'parameters':{'to_search': '¿Parameters?', 'to_put': Param},
         'output':{'to_search': '¿output?', 'to_put': self.path_out_states},
         'peakflow':{'to_search': '¿peakflow?', 'to_put': self.path_in_links2save},
         'initial':{'to_search': '¿initial?', 'to_put': self.path_in_initial},
         'initial_flag': {'to_search': '¿initialflag?', 'to_put': InitNumber},
         'snapshot_flag': {'to_search': '¿snapflag?', 'to_put': Snap_flag},
         'snapshot_time': {'to_search': '¿snaptime?', 'to_put': Snap_time},
         'snapthot_name': {'to_search': '¿snapshot?', 'to_put': Snap_name},}
     # Replacement in the document.
     filename = self.path_in_global
     for k in DicToreplace:            
         with fileinput.FileInput(filename, inplace=True) as file:
             for line in file:                    
                 text_to_search = DicToreplace[k]['to_search']
                 replacement_text = str(DicToreplace[k]['to_put'])
                 print(line.replace(text_to_search, replacement_text), end='')
Example #8
0
 def write_Global(self, path2global, model_uid = 604,
     date1 = None, date2 = None, rvrFile = None, rvrType = 0, rvrLink = 0, prmFile = None, prmType = 0, initialFile = None,
     initialType = 1,rainType = 5, rainPath = None, evpFile = 'evap.mon', datResults = None,
     nComponents = 1, Components = [0], controlFile = None, baseGlobal = None, noWarning = False, snapType = 0,
     snapPath = '', snapTime = '', evpFromSysPath = False):
     '''Creates a global file for the current project.
         - model_uid: is the number of hte model goes from 601 to 604.
         - date1 and date2: initial date and end date
         - rvrFile: path to rvr file.
         - rvrType: 0: .rvr file, 1: databse .dbc file.
         - rvrLink: 0: all the domain, N: number of the linkid.
         - prmFile: path to prm file.
         - prmType: 0: .prm file, 1: databse .dbc file.
         - initialFile: path to file with initial conditions.
         - initialType: type of initial file:
             - 0: ini, 1: uini, 2: rec, 3: .dbc
         - rainType: number inficating the type of the rain to be used.
             - 1: plain text with rainfall data for each link.
             - 3: Database.
             - 4: Uniform storm file: .ustr
             - 5: Binary data with the unix time
         - rainPath: path to the folder containning the binary files of the rain.
             or path to the file with the dabase
         - evpFile: path to the file with the values of the evp.
         - datResults: File where .dat files will be written.
         - nComponents: Number of results to put in the .dat file.
         - Components: Number of each component to write: [0,1,2,...,N]
         - controlFile: File with the number of the links to write.
         - baseGlobal: give the option to use a base global that is not the default
         - snapType: type of snapshot to make with the model:
             - 0: no snapshot, 1: .rec file, 2: to database, 3: to hdf5, 4:
                 recurrent hdf5
         - snapPath: path to the snapshot.
         - snapTime: time interval between snapshots (min)
         - evpFromSysPath: add the path of the system to the evp file.'''
     #Open the base global file and creates tyhe template
     if baseGlobal is not None:
         f = open(baseGlobal, 'r')
         L = f.readlines()
         f.close()
     else:
         L = Globals['60X']
     t = []
     for i in L:
         t += i
     Base = Template(''.join(t))
     # Databse rainfall 
     if rainType == 3 and rainPath is None:
         rainPath = '/Dedicated/IFC/model_eval/forcing_rain51_5435_s4.dbc'
     if rvrType == 1 and rvrFile is None:
         rvrFile = '/Dedicated/IFC/model_eval/topo51.dbc'
     #Chang  the evp path 
     if evpFromSysPath:
         evpFile = Path + evpFile
     # Creates the default Dictionary.
     Default = {
         'model_uid' : model_uid,
         'date1': date1,
         'date2': date2,
         'rvrFile': rvrFile,
         'rvrType': str(rvrType),
         'rvrLink': str(rvrLink),
         'prmFile': prmFile,
         'prmType': str(prmType),
         'initialFile': initialFile,
         'initialType': initialType,
         'rainType': str(rainType),
         'rainPath': rainPath,
         'evpFile': evpFile,
         'datResults': datResults,
         'controlFile': controlFile,
         'snapType': str(snapType),
         'snapPath': snapPath,
         'snapTime': str(snapTime),
         'nComp': str(nComponents)
     }
     if date1 is not None:
         Default.update({'unix1': aux.__datetime2unix__(Default['date1'])})
     else:
         Default.update({'unix1': '$'+'unix1'})
     if date2 is not None:
         Default.update({'unix2': aux.__datetime2unix__(Default['date2'])})
     else:
         Default.update({'unix2': '$'+'unix2'})
     #Update the list of components to write
     for n, c in enumerate(Components):
         Default.update({'Comp'+str(n): 'State'+str(c)})
     if nComponents <= 9:
         for c in range(9-nComponents):
             Default.update({'Comp'+str(8-c): 'XXXXX'})
     #Check for parameters left undefined
     D = {}
     for k in Default.keys():
         if Default[k] is not None:
             D.update({k: Default[k]})
         else:
             if noWarning:
                 print('Warning: parameter ' + k +' left undefined model wont run')
             D.update({k: '$'+k})
     #Update parameter on the base and write global 
     f = open(path2global,'w', newline='\n')
     f.writelines(Base.substitute(D))
     f.close()
     #Erase unused print components
     f = open(path2global,'r')
     L = f.readlines()
     f.close()
     flag = True
     while flag:
         try:
             L.remove('XXXXX\n')
         except:
             flag = False
     f = open(path2global,'w', newline='\n')
     f.writelines(L)
     f.close()