Esempio n. 1
0
def PrepareInputs(Raster,InputFolder,FolderName):
    """
    ================================================================
        PrepareInputs(Raster,InputFolder,FolderName)
    ================================================================
    this function prepare downloaded raster data to have the same align and 
    nodatavalue from a GIS raster (DEM, flow accumulation, flow direction raster)
    and return a folder with the output rasters with a name "New_Rasters"
    
    Inputs:
        1-Raster:
            [String] path to the spatial information source raster to get the spatial information 
            (coordinate system, no of rows & columns) A_path should include the name of the raster 
            and the extension like "data/dem.tif"
        2-InputFolder:
            [String] path of the folder of the rasters you want to adjust their 
            no of rows, columns and resolution (alignment) like raster A 
            the folder should not have any other files except the rasters
        3-FolderName:
            [String] name to create a folder to store resulted rasters
    Example:
        Ex1:
            dem_path="01GIS/inputs/4000/acc4000.tif"
            prec_in_path="02Precipitation/CHIRPS/Daily/"
            Inputs.PrepareInputs(dem_path,prec_in_path,"prec")
        Ex2:
            dem_path="01GIS/inputs/4000/acc4000.tif"
            outputpath="00inputs/meteodata/4000/"
            evap_in_path="03Weather_Data/evap/"
            Inputs.PrepareInputs(dem_path,evap_in_path,outputpath+"evap")
    """
    # input data validation
    # data type
    assert type(FolderName)== str, "FolderName input should be string type"
    # create a new folder for new created alligned rasters in temp
    # check if you can create the folder 
    try:
        os.makedirs(os.path.join(os.environ['TEMP'],"AllignedRasters"))
    except WindowsError : 
        # if not able to create the folder delete the folder with the same name and create one empty
        shutil.rmtree(os.path.join(os.environ['TEMP']+"/AllignedRasters"))
        os.makedirs(os.path.join(os.environ['TEMP'],"AllignedRasters"))
        
    temp=os.environ['TEMP']+"/AllignedRasters/"
    
    # match alignment 
    GIS.MatchDataAlignment(Raster,InputFolder,temp)
    # create new folder in the current directory for alligned and nodatavalue matched cells
    try:
        os.makedirs(os.path.join(os.getcwd(),FolderName))
    except WindowsError:
        print("please function is trying to create a folder with a name New_Rasters to complete the process if there is a folder with the same name please rename it to other name")    
    # match nodata value 
    GIS.MatchDataNoValuecells(Raster,temp,FolderName+"/")
    # delete the processing folder from temp
    shutil.rmtree(temp)
Esempio n. 2
0
def getDeltas(fileOld, fileNew, cfg, directory):
    loadedNew = loadTweets(fileNew,cfg)
    timeList = [entry['created_at'] for entry in loadedNew.values()]
    minTime = min(timeList)
    
    if not cfg['OneTimeDump']:
        loadedOld = {key:item for key, item in loadTweets(fileOld,cfg).iteritems() if item['created_at'] >= minTime}
    else:
        loadedOld = dict()
    
    merged = deepcopy(loadedOld); merged.update(loadedNew)
    
    newKeys = set(loadedNew.keys())
    oldKeys = set(loadedOld.keys())
    
    addedKeys = newKeys.difference(oldKeys)
    removedKeys = oldKeys.difference(newKeys)
    sameKeys = newKeys.intersection(oldKeys)
    updatedKeys = set([entry for entry in sameKeys if makeKey(loadedNew[entry],updateKeys) != makeKey(loadedOld[entry],updateKeys)])
    expDir = 'studies/'+ cfg['OutDir'] + cfg['Method'] + '/'
    
    timeStamp =  GISpy.outTime(datetime.datetime.now())['db']
    wordWeight = getWordWeights(loadedNew,5,expDir,timeStamp)
    meta = getMeta(cfg,expDir,timeStamp)
    fileLocs = [fileNew,wordWeight,meta]
    
    addedLoc = removedLoc = updatedLoc = 'null'
    
    
    
    if len(addedKeys) >= 1:
        if cfg['OneTimeDump']:
            descriptor = 'Dumped'
            operation = 'dump'
        else:
            descriptor = 'Added'
            operation = 'add'
        addedData = {key:value for key,value in merged.iteritems() if key in addedKeys}
        addExtra(addedData,{'operation':operation,'operationTime':timeStamp})
        addedLoc = writeCSV(addedData,expDir,descriptor,'')
        fileLocs.append(addedLoc)
    if len(removedKeys) >= 1:
        removedData = {key:value for key,value in merged.iteritems() if key in removedKeys}
        addExtra(removedData,{'operation':'remove','operationTime':timeStamp})
        removedLoc = writeCSV(removedData,expDir,"Removed",'')
        fileLocs.append(removedLoc)
    if len(updatedKeys) >= 1:
        updatedData = {key:value for key,value in merged.iteritems() if key in updatedKeys}
        addExtra(updatedData,{'operation':'updated','operationTime':timeStamp})
        updatedLoc = writeCSV(updatedData,expDir,"Updated",'')
        fileLocs.append(updatedLoc)
    
    
    GISpy.zipData(fileLocs,'dbFiles/'+directory,'DBFeed ',timeStamp,cfg)
    return {'wordWeight':wordWeight,'meta':meta,'added':addedLoc,'removed':removedLoc,'updated':updatedLoc}
def DeleteBasins(basins, pathout):
    """
    ===========================================================
         DeleteBasins(basins,pathout)
    ===========================================================
    this function deletes all the basins in a basin raster created when delineating
    a catchment and leave only the first basin which is the biggest basin in the raster
    
    Inputs:
    ----------
        1- basins:
            [gdal.dataset] raster you create during delineation of a catchment 
            values of its cells are the number of the basin it belongs to
        2- pathout:
            [String] path you want to save the resulted raster to it should include
            the extension ".tif"
    Outputs:
    ----------
        1- raster with only one basin (the basin that its name is 1 )
    
    Example:
    ----------
        basins=gdal.Open("Data/basins.tif")    
        pathout="mask.tif"
        DeleteBasins(basins,pathout)
    """
    # input data validation
    # data type
    assert type(pathout) == str, "A_path input should be string type"
    assert type(
        basins
    ) == gdal.Dataset, "basins raster should be read using gdal (gdal dataset please read it using gdal library) "

    # input values
    # check wether the user wrote the extension of the raster or not
    ext = pathout[-4:]
    assert ext == ".tif", "please add the extension at the end of the path input"

    # get number of rows
    rows = basins.RasterYSize
    # get number of columns
    cols = basins.RasterXSize
    # array
    basins_A = basins.ReadAsArray()
    # no data value
    no_val = np.float32(basins.GetRasterBand(1).GetNoDataValue())
    # get number of basins and there names
    basins_val = list(
        set([
            int(basins_A[i, j]) for i in range(rows) for j in range(cols)
            if basins_A[i, j] != no_val
        ]))

    # keep the first basin and delete the others by filling their cells by nodata value
    for i in range(rows):
        for j in range(cols):
            if basins_A[i, j] != no_val and basins_A[i, j] != basins_val[0]:
                basins_A[i, j] = no_val

    GIS.RasterLike(basins, basins_A, pathout)
Esempio n. 4
0
def loadTweets(fileRef, cfg):
    loaded = pd.DataFrame.from_csv(fileRef, index_col='created_at')
    loaded = loaded.reset_index()
    indexed = dict()
    for pos in loaded.index:
        entry = dict(loaded.irow(pos))
        if cfg['Sanitize']:
            entry = GISpy.sanitizeTweet(entry, cfg)
        indexed[makeKey(entry, indexKeys)] = entry
    return indexed
Esempio n. 5
0
def loadTweets(fileRef,cfg):
    loaded = pd.DataFrame.from_csv(fileRef,index_col='created_at')
    loaded = loaded.reset_index()
    indexed = dict()
    for pos in loaded.index:
        entry = dict(loaded.irow(pos))
        if cfg['Sanitize']:
            entry = GISpy.sanitizeTweet(entry,cfg)
        indexed[makeKey(entry,indexKeys)] = entry
    return indexed
Esempio n. 6
0
def getDeltas(fileOld, fileNew, cfg, directory):
    loadedNew = loadTweets(fileNew, cfg)
    timeList = [entry['created_at'] for entry in loadedNew.values()]
    minTime = min(timeList)

    if not cfg['OneTimeDump']:
        loadedOld = {
            key: item
            for key, item in loadTweets(fileOld, cfg).iteritems()
            if item['created_at'] >= minTime
        }
    else:
        loadedOld = dict()

    merged = deepcopy(loadedOld)
    merged.update(loadedNew)

    newKeys = set(loadedNew.keys())
    oldKeys = set(loadedOld.keys())

    addedKeys = newKeys.difference(oldKeys)
    removedKeys = oldKeys.difference(newKeys)
    sameKeys = newKeys.intersection(oldKeys)
    updatedKeys = set([
        entry for entry in sameKeys
        if makeKey(loadedNew[entry], updateKeys) != makeKey(
            loadedOld[entry], updateKeys)
    ])
    expDir = 'studies/' + cfg['OutDir'] + cfg['Method'] + '/'

    timeStamp = GISpy.outTime(datetime.datetime.now())['db']
    wordWeight = getWordWeights(loadedNew, 5, expDir, timeStamp)
    meta = getMeta(cfg, expDir, timeStamp)
    fileLocs = [fileNew, wordWeight, meta]

    addedLoc = removedLoc = updatedLoc = 'null'

    if len(addedKeys) >= 1:
        if cfg['OneTimeDump']:
            descriptor = 'Dumped'
            operation = 'dump'
        else:
            descriptor = 'Added'
            operation = 'add'
        addedData = {
            key: value
            for key, value in merged.iteritems() if key in addedKeys
        }
        addExtra(addedData, {
            'operation': operation,
            'operationTime': timeStamp
        })
        addedLoc = writeCSV(addedData, expDir, descriptor, '')
        fileLocs.append(addedLoc)
    if len(removedKeys) >= 1:
        removedData = {
            key: value
            for key, value in merged.iteritems() if key in removedKeys
        }
        addExtra(removedData, {
            'operation': 'remove',
            'operationTime': timeStamp
        })
        removedLoc = writeCSV(removedData, expDir, "Removed", '')
        fileLocs.append(removedLoc)
    if len(updatedKeys) >= 1:
        updatedData = {
            key: value
            for key, value in merged.iteritems() if key in updatedKeys
        }
        addExtra(updatedData, {
            'operation': 'updated',
            'operationTime': timeStamp
        })
        updatedLoc = writeCSV(updatedData, expDir, "Updated", '')
        fileLocs.append(updatedLoc)

    GISpy.zipData(fileLocs, 'dbFiles/' + directory, 'DBFeed ', timeStamp, cfg)
    return {
        'wordWeight': wordWeight,
        'meta': meta,
        'added': addedLoc,
        'removed': removedLoc,
        'updated': updatedLoc
    }
Esempio n. 7
0
def SaveParameters(DistParFn, Raster, Par, No_parameters, snow, kub, klb, 
                   Path=None):
    """
    ============================================================
        SaveParameters(DistParFn, Raster, Par, No_parameters, snow, kub, klb, Path=None)
    ============================================================
    this function takes generated parameters by the calibration algorithm, 
    distributed them with a given function and save them as a rasters
    
    Inputs:
    ----------
        1-DistParFn:
            [function] function to distribute the parameters (all functions are
            in Hapi.DistParameters )
        2-Raster:
            [gdal.dataset] raster to get the spatial information
        3-Par
            [list or numpy ndarray] parameters as 1D array or list
        4-no_parameters:
            [int] number of the parameters in the conceptual model
        5-snow:
            [integer] number to define whether to take parameters of 
            the conceptual model with snow subroutine or without
        5-kub:
            [numeric] upper bound for k parameter in muskingum function
        6-klb:
            [numeric] lower bound for k parameter in muskingum function
         7-Path:
             [string] path to the folder you want to save the parameters in
             default value is None (parameters are going to be saved in the
             current directory)
     
    Outputs:
    ----------
         Rasters for parameters of the distributed model
     
   Examples:     
   ----------
        DemPath = path+"GIS/4000/dem4000.tif"
        Raster=gdal.Open(DemPath)
        ParPath = "par15_7_2018.txt"
        par=np.loadtxt(ParPath)
        klb=0.5
        kub=1
        no_parameters=12
        DistParFn=DP.par3dLumped
        Path="parameters/"
        snow=0
        
        SaveParameters(DistParFn, Raster, par, no_parameters,snow ,kub, klb,Path)
    """
    assert callable(DistParFn), " please check the function to distribute your parameters"
    assert type(Raster)==gdal.Dataset, "raster should be read using gdal (gdal dataset please read it using gdal library) "
    assert type(Par)==np.ndarray or type(Par)==list, "par_g should be of type 1d array or list"
    assert type(No_parameters) == int, "No of parameters should be integer"
    assert isinstance(kub,numbers.Number) , " kub should be a number"
    assert isinstance(klb,numbers.Number) , " klb should be a number"
    assert type(Path) == str, "path should be of type string"
    assert os.path.exists(Path), Path + " you have provided does not exist"
    
    par2d=DistParFn(Par,Raster,No_parameters,kub,klb)
    # save 
    if snow == 0: # now snow subroutine
        pnme=["01rfcf.tif","02FC.tif", "03BETA.tif", "04ETF.tif", "05LP.tif", "06CFLUX.tif", "07K.tif",
              "08K1.tif","09ALPHA.tif", "10PERC.tif", "11Kmuskingum.tif", "12Xmuskingum.tif"]
    else: # there is snow subtoutine 
        pnme=["01ltt.tif", "02utt.tif", "03rfcf.tif", "04sfcf.tif", "05ttm.tif", "06cfmax.tif", "07cwh.tif",
              "08cfr.tif", "09fc.tif", "10fc.tif", "11beta.tif","12etf.tif","13lp.tif","14cflux.tif",
              "15k.tif","16k1.tif","17alpha.tif","18perc.tif"]
        
    if Path != None:
        pnme=[Path+i for i in pnme]

    for i in range(np.shape(par2d)[2]):
        GIS.RasterLike(Raster,par2d[:,:,i],pnme[i])
Esempio n. 8
0
def RunModel(ConceptualModel, Paths, ParPath, p2, init_st, snow):
    """
    =======================================================================
        RunModel(PrecPath, Evap_Path, TempPath, DemPath, FlowAccPath, FlowDPath, ParPath, p2)
    =======================================================================
    this function runs the conceptual distributed hydrological model
    
    Inputs:
        1-Paths:
            1-PrecPath:
                [String] path to the Folder contains precipitation rasters
            2-Evap_Path:
                [String] path to the Folder contains Evapotranspiration rasters
            3-TempPath:
                [String] path to the Folder contains Temperature rasters
            4-FlowAccPath:
                [String] path to the Flow Accumulation raster of the catchment (it should
                include the raster name and extension)
            5-FlowDPath:
                [String] path to the Flow Direction raster of the catchment (it should
                include the raster name and extension)
        7-ParPath:
            [String] path to the Folder contains parameters rasters of the catchment 
        8-p2:
            [List] list of unoptimized parameters
            p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step
            p2[1] = catchment area in km2
    Outputs:
        1- st:
            [4D array] state variables
        2- q_out:
            [1D array] calculated Discharge at the outlet of the catchment
        3- q_uz:
            [3D array] Distributed discharge for each cell
    Example:
        PrecPath = prec_path="meteodata/4000/calib/prec"
        Evap_Path = evap_path="meteodata/4000/calib/evap"
        TempPath = temp_path="meteodata/4000/calib/temp"
        DemPath = "GIS/4000/dem4000.tif"
        FlowAccPath = "GIS/4000/acc4000.tif"
        FlowDPath = "GIS/4000/fd4000.tif"
        ParPath = "meteodata/4000/parameters"
        p2=[1, 227.31]
        st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath,
                                          FlowAccPath,FlowDPath,ParPath,p2)
    """
    # input data validation
    assert len(Paths) == 5, "Paths should include 5 folder pathes " +str(len(Paths))+" paths are only provided"
    
    PrecPath=Paths[0]
    Evap_Path=Paths[1]
    TempPath=Paths[2]
#    DemPath=Paths[3]
    FlowAccPath=Paths[3]
    FlowDPath=Paths[4]
    
    # data type
    assert type(PrecPath)== str, "PrecPath input should be string type"
    assert type(Evap_Path)== str, "Evap_Path input should be string type"
    assert type(TempPath)== str, "TempPath input should be string type"
#    assert type(DemPath)== str, "DemPath input should be string type"
    assert type(FlowAccPath)== str, "FlowAccPath input should be string type"
    assert type(FlowDPath)== str, "FlowDPath input should be string type"
    assert type(ParPath)== str, "ParPath input should be string type"
    
    
    # input values
#    dem_ext=DemPath[-4:]
#    assert dem_ext == ".tif", "please add the extension at the end of the DEM raster path input"
    acc_ext=FlowAccPath[-4:]
    assert acc_ext == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
    fd_ext=FlowDPath[-4:]
    assert fd_ext == ".tif", "please add the extension at the end of the Flow Direction path input"
    # check wether the path exists or not
    assert os.path.exists(PrecPath), PrecPath + " you have provided does not exist"
    assert os.path.exists(Evap_Path), Evap_Path+" path you have provided does not exist"
    assert os.path.exists(TempPath), TempPath+" path you have provided does not exist"
#    assert os.path.exists(DemPath), DemPath+ " you have provided does not exist"
    assert os.path.exists(FlowAccPath), FlowAccPath + " you have provided does not exist"
    assert os.path.exists(FlowDPath), FlowDPath+ " you have provided does not exist"
    # check wether the folder has the rasters or not 
    assert len(os.listdir(PrecPath)) > 0, PrecPath+" folder you have provided is empty"
    assert len(os.listdir(Evap_Path)) > 0, Evap_Path+" folder you have provided is empty"
    assert len(os.listdir(TempPath)) > 0, TempPath+" folder you have provided is empty"
    
    # read data
    ### meteorological data
    prec=GIS.ReadRastersFolder(PrecPath)
    evap=GIS.ReadRastersFolder(Evap_Path)
    temp=GIS.ReadRastersFolder(TempPath)
    print("meteorological data are read successfully")
    
    #### GIS data
#    dem= gdal.Open(DemPath) 
    acc=gdal.Open(FlowAccPath)
    fd=gdal.Open(FlowDPath)
    print("GIS data are read successfully")
    
    # parameters
    parameters=GIS.ReadRastersFolder(ParPath)
    print("Parameters are read successfully")
    
    #run the model
    st, q_out, q_uz, q_lz = Wrapper.Dist_model(ConceptualModel, acc, fd, prec, evap,
                                               temp, parameters, p2, snow, init_st)
    
    return st, q_out, q_uz, q_lz
Esempio n. 9
0
def Dist_HBV2(lakecell,q_lake,DEM,flow_acc,flow_acc_plan, sp_prec, sp_et, sp_temp, sp_pars, p2, init_st=None, 
                ll_temp=None, q_0=None):
    '''
    Make spatially distributed HBV in the SM and UZ
    interacting cells 
    '''
    
    n_steps = sp_prec.shape[2] + 1 # no of time steps =length of time series +1
    # intiialise vector of nans to fill states
    dummy_states = np.empty([n_steps, 5]) # [sp,sm,uz,lz,wc]
    dummy_states[:] = np.nan
    
    # Get the mask
    mask, no_val = GISpy.get_mask(DEM)
    x_ext, y_ext = mask.shape # shape of the fpl raster (rows, columns)-------------- rows are x and columns are y
    #    y_ext, x_ext = mask.shape # shape of the fpl raster (rows, columns)------------ should change rows are y and columns are x
    
    # Get deltas of pixel
    geo_trans = DEM.GetGeoTransform() # get the coordinates of the top left corner and cell size [x,dx,y,dy]
    dx = np.abs(geo_trans[1])/1000.0  # dx in Km
    dy = np.abs(geo_trans[-1])/1000.0  # dy in Km
    px_area = dx*dy  # area of the cell
    
    # Enumerate the total number of pixels in the catchment
    tot_elem = np.sum(np.sum([[1 for elem in mask_i if elem != no_val] for mask_i in mask])) # get row by row and search [mask_i for mask_i in mask]
    
    # total pixel area
    px_tot_area = tot_elem*px_area # total area of pixels 
    
    # Get number of non-value data
    
    st = []  # Spatially distributed states
    q_lz = []
    q_uz = []
    #------------------------------------------------------------------------------
    for x in range(x_ext): # no of rows
        st_i = []
        q_lzi = []
        q_uzi = []
    #        q_out_i = []
    # run all cells in one row ----------------------------------------------------
        for y in range(y_ext): # no of columns
            if mask [x, y] != no_val:  # only for cells in the domain
                # Calculate the states per cell
                # TODO optimise for multiprocessing these loops   
                _, _st, _uzg, _lzg = HBV.simulate_new_model(avg_prec = sp_prec[x, y,:], 
                                              temp = sp_temp[x, y,:], 
                                              et = sp_et[x, y,:], 
                                              par = sp_pars[x, y, :], 
                                              p2 = p2, 
                                              init_st = init_st, 
                                              ll_temp = None, 
                                              q_0 = q_0,
                                              extra_out = True)
    #               # append column after column in the same row -----------------
                st_i.append(np.array(_st))
                #calculate upper zone Q = K1*(LZ_int_1)
                q_lz_temp=np.array(sp_pars[x, y, 6])*_lzg
                q_lzi.append(q_lz_temp)
                # calculate lower zone Q = k*(UZ_int_3)**(1+alpha)
                q_uz_temp = np.array(sp_pars[x, y, 5])*(np.power(_uzg, (1.0 + sp_pars[x, y, 7])))
                q_uzi.append(q_uz_temp)
                
    #                print("total = "+str(fff)+"/"+str(tot_elem)+" cell, row= "+str(x+1)+" column= "+str(y+1) )
            else: # if the cell is novalue-------------------------------------
                # Fill the empty cells with a nan vector
                st_i.append(dummy_states) # fill all states(5 states) for all time steps = nan
                q_lzi.append(dummy_states[:,0]) # q lower zone =nan  for all time steps = nan
                q_uzi.append(dummy_states[:,0]) # q upper zone =nan  for all time steps = nan
    # store row by row-------- ---------------------------------------------------- 
    #        st.append(st_i) # state variables 
        st.append(st_i) # state variables 
        q_lz.append(np.array(q_lzi)) # lower zone discharge mm/timestep
        q_uz.append(np.array(q_uzi)) # upper zone routed discharge mm/timestep
    #------------------------------------------------------------------------------            
    # convert to arrays 
    st = np.array(st)
    q_lz = np.array(q_lz)
    q_uz = np.array(q_uz)
    #%% convert quz from mm/time step to m3/sec
    area_coef=p2[1]/px_tot_area
    q_uz=q_uz*px_area*area_coef/(p2[0]*3.6)
    
    no_cells=list(set([flow_acc_plan[i,j] for i in range(x_ext) for j in range(y_ext) if not np.isnan(flow_acc_plan[i,j])]))
#    no_cells=list(set([int(flow_acc_plan[i,j]) for i in range(x_ext) for j in range(y_ext) if flow_acc_plan[i,j] != no_val]))
    no_cells.sort()

    #%% routing lake discharge with DS cell k & x and adding to cell Q
    q_lake=Routing.muskingum(q_lake,q_lake[0],sp_pars[lakecell[0],lakecell[1],10],sp_pars[lakecell[0],lakecell[1],11],p2[0])
    q_lake=np.append(q_lake,q_lake[-1])
    # both lake & Quz are in m3/s
    #new
    q_uz[lakecell[0],lakecell[1],:]=q_uz[lakecell[0],lakecell[1],:]+q_lake
    #%% cells at the divider
    q_uz_routed=np.zeros_like(q_uz)*np.nan
    # for all cell with 0 flow acc put the q_uz
    for x in range(x_ext): # no of rows
        for y in range(y_ext): # no of columns
            if mask [x, y] != no_val and flow_acc_plan[x, y]==0: 
                q_uz_routed[x,y,:]=q_uz[x,y,:]        
    #%% new
    for j in range(1,len(no_cells)): #2):#
        for x in range(x_ext): # no of rows
            for y in range(y_ext): # no of columns
                    # check from total flow accumulation 
                    if mask [x, y] != no_val and flow_acc_plan[x, y]==no_cells[j]:
#                        print(no_cells[j])
                        q_r=np.zeros(n_steps)
                        for i in range(len(flow_acc[str(x)+","+str(y)])): #  no_cells[j]
                            # bring the indexes of the us cell
                            x_ind=flow_acc[str(x)+","+str(y)][i][0]
                            y_ind=flow_acc[str(x)+","+str(y)][i][1]
                            # sum the Q of the US cells (already routed for its cell)
                             # route first with there own k & xthen sum
                            q_r=q_r+Routing.muskingum(q_uz_routed[x_ind,y_ind,:],q_uz_routed[x_ind,y_ind,0],sp_pars[x_ind,y_ind,10],sp_pars[x_ind,y_ind,11],p2[0]) 
#                        q=q_r
                         # add the routed upstream flows to the current Quz in the cell
                        q_uz_routed[x,y,:]=q_uz[x,y,:]+q_r
    #%% check if the max flow _acc is at the outlet
#    if tot_elem != np.nanmax(flow_acc_plan):
#        raise ("flow accumulation plan is not correct")
    # outlet is the cell that has the max flow_acc
    outlet=np.where(flow_acc_plan==np.nanmax(flow_acc_plan)) #np.nanmax(flow_acc_plan)
    outletx=outlet[0][0]
    outlety=outlet[1][0]              
    #%%
    q_lz = np.array([np.nanmean(q_lz[:,:,i]) for i in range(n_steps)]) # average of all cells (not routed mm/timestep)
    # convert Qlz to m3/sec 
    q_lz = q_lz* p2[1]/ (p2[0]*3.6) # generation
    
    q_out = q_lz + q_uz_routed[outletx,outlety,:]    

    return q_out, st, q_uz_routed, q_lz, q_uz
Esempio n. 10
0
def RunCalibration(ConceptualModel,
                   Paths,
                   Basic_inputs,
                   SpatialVarFun,
                   SpatialVarArgs,
                   OF,
                   OF_args,
                   Q_obs,
                   OptimizationArgs,
                   printError=None):
    """
    =======================================================================
        RunCalibration(Paths, p2, Q_obs, UB, LB, SpatialVarFun, lumpedParNo, lumpedParPos, objective_function, printError=None, *args):
    =======================================================================
    this function runs the conceptual distributed hydrological model
    
    Inputs:
    ----------
        1-Paths:
            1-PrecPath:
                [String] path to the Folder contains precipitation rasters
            2-Evap_Path:
                [String] path to the Folder contains Evapotranspiration rasters
            3-TempPath:
                [String] path to the Folder contains Temperature rasters
            4-FlowAccPath:
                [String] path to the Flow Accumulation raster of the catchment (it should
                include the raster name and extension)
            5-FlowDPath:
                [String] path to the Flow Direction raster of the catchment (it should
                include the raster name and extension)
        
        2-Basic_inputs:
            1-p2:
                [List] list of unoptimized parameters
                p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step
                p2[1] = catchment area in km2
            2-init_st:
                [list] initial values for the state variables [sp,sm,uz,lz,wc] in mm
            3-UB:
                [Numeric] upper bound of the values of the parameters
            4-LB:
                [Numeric] Lower bound of the values of the parameters
        3-Q_obs:
            [Numeric] Observed values of discharge 
        
        6-lumpedParNo:
            [int] nomber of lumped parameters, you have to enter the value of 
            the lumped parameter at the end of the list, default is 0 (no lumped parameters)
        7-lumpedParPos:
            [List] list of order or position of the lumped parameter among all
            the parameters of the lumped model (order starts from 0 to the length 
            of the model parameters), default is [] (empty), the following order
            of parameters is used for the lumped HBV model used
            [ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp,
            c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum]
        8-objective_function:
            [function] objective function to calculate the performance of the model
            and to be used in the calibration
        9-*args:
            other arguments needed on the objective function
            
    Outputs:
    ----------
        1- st:
            [4D array] state variables
        2- q_out:
            [1D array] calculated Discharge at the outlet of the catchment
        3- q_uz:
            [3D array] Distributed discharge for each cell
    
    Example:
    ----------
        PrecPath = prec_path="meteodata/4000/calib/prec"
        Evap_Path = evap_path="meteodata/4000/calib/evap"
        TempPath = temp_path="meteodata/4000/calib/temp"
        FlowAccPath = "GIS/4000/acc4000.tif"
        FlowDPath = "GIS/4000/fd4000.tif"
        ParPath = "meteodata/4000/"+"parameters.txt"
        p2=[1, 227.31]
        st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath,
                                          FlowAccPath,FlowDPath,ParPath,p2)
    """
    ### inputs validation
    # data type

    assert len(Paths) == 5, "Paths should include 5 folder pathes " + str(
        len(Paths)) + " paths are only provided"

    PrecPath = Paths[0]
    Evap_Path = Paths[1]
    TempPath = Paths[2]
    #    DemPath=Paths[3]
    FlowAccPath = Paths[3]
    FlowDPath = Paths[4]

    assert type(PrecPath) == str, "PrecPath input should be string type"
    assert type(Evap_Path) == str, "Evap_Path input should be string type"
    assert type(TempPath) == str, "TempPath input should be string type"
    #    assert type(DemPath)== str, "DemPath input should be string type"
    assert type(FlowAccPath) == str, "FlowAccPath input should be string type"
    assert type(FlowDPath) == str, "FlowDPath input should be string type"

    # input values
    #    dem_ext=DemPath[-4:]
    #    assert dem_ext == ".tif", "please add the extension at the end of the DEM raster path input"
    acc_ext = FlowAccPath[-4:]
    assert acc_ext == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
    fd_ext = FlowDPath[-4:]
    assert fd_ext == ".tif", "please add the extension at the end of the Flow Direction path input"

    # check wether the path exists or not
    assert os.path.exists(
        PrecPath), PrecPath + " you have provided does not exist"
    assert os.path.exists(
        Evap_Path), Evap_Path + " path you have provided does not exist"
    assert os.path.exists(
        TempPath), TempPath + " path you have provided does not exist"
    #    assert os.path.exists(DemPath), DemPath+ " you have provided does not exist"
    assert os.path.exists(
        FlowAccPath), FlowAccPath + " you have provided does not exist"
    assert os.path.exists(
        FlowDPath), FlowDPath + " you have provided does not exist"

    # check wether the folder has the rasters or not
    assert len(os.listdir(
        PrecPath)) > 0, PrecPath + " folder you have provided is empty"
    assert len(os.listdir(
        Evap_Path)) > 0, Evap_Path + " folder you have provided is empty"
    assert len(os.listdir(
        TempPath)) > 0, TempPath + " folder you have provided is empty"

    # basic inputs
    # check if all inputs are included
    assert all(
        ["p2", "init_st", "UB", "LB", "snow "][i] in Basic_inputs.keys()
        for i in range(
            4)), "Basic_inputs should contain ['p2','init_st','UB','LB'] "

    p2 = Basic_inputs['p2']
    init_st = Basic_inputs["init_st"]
    UB = Basic_inputs['UB']
    LB = Basic_inputs['LB']
    snow = Basic_inputs['snow']

    assert len(UB) == len(LB), "length of UB should be the same like LB"

    # check objective_function
    assert callable(OF), "second argument should be a function"

    if OF_args == None:
        OF_args = []

    # read data
    ### meteorological data
    prec = GIS.ReadRastersFolder(PrecPath)
    evap = GIS.ReadRastersFolder(Evap_Path)
    temp = GIS.ReadRastersFolder(TempPath)
    print("meteorological data are read successfully")
    #### GIS data
    #    dem= gdal.Open(DemPath)
    acc = gdal.Open(FlowAccPath)
    fd = gdal.Open(FlowDPath)
    print("GIS data are read successfully")

    ### optimization

    # get arguments
    store_history = OptimizationArgs[0]
    history_fname = OptimizationArgs[1]
    # check optimization arguement
    assert store_history != 0 or store_history != 1, "store_history should be 0 or 1"
    assert type(
        history_fname) == str, "history_fname should be of type string "
    assert history_fname[
        -4:] == ".txt", "history_fname should be txt file please change extension or add .txt ad the end of the history_fname"

    print('Calibration starts')

    ### calculate the objective function
    def opt_fun(par):
        try:
            # parameters
            klb = float(par[-2])
            kub = float(par[-1])
            par = par[:-2]

            par_dist = SpatialVarFun(par, *SpatialVarArgs, kub=kub, klb=klb)

            #run the model
            _, q_out, q_uz_routed, q_lz_trans = Wrapper.Dist_model(
                ConceptualModel, acc, fd, prec, evap, temp, par_dist, p2, snow,
                init_st)

            # calculate performance of the model
            try:
                error = OF(Q_obs, q_out, q_uz_routed, q_lz_trans, *OF_args)
            except TypeError:  # if no of inputs less than what the function needs
                assert 1 == 5, "the objective function you have entered needs more inputs please enter then in a list as *args"

            # print error
            if printError != 0:
                print(error)
                print(par)

            fail = 0
        except:
            error = np.nan
            fail = 1

        return error, [], fail

    ### define the optimization components
    opt_prob = Optimization('HBV Calibration', opt_fun)
    for i in range(len(LB)):
        opt_prob.addVar('x{0}'.format(i), type='c', lower=LB[i], upper=UB[i])

    print(opt_prob)

    opt_engine = ALHSO(etol=0.0001,
                       atol=0.0001,
                       rtol=0.0001,
                       stopiters=10,
                       hmcr=0.5,
                       par=0.5)  #,filename='mostafa.out'

    Optimizer.__init__(
        opt_engine,
        def_options={
            'hms': [int, 9],  # Memory Size [1,50]
            'hmcr': [float, 0.95
                     ],  # Probability rate of choosing from memory [0.7,0.99]
            'par': [float, 0.99],  # Pitch adjustment rate [0.1,0.99]
            'dbw': [int, 2000],  # Variable Bandwidth Quantization
            'maxoutiter':
            [int, 2e3
             ],  # Maximum Number of Outer Loop Iterations (Major Iterations)
            'maxinniter':
            [int, 2e2
             ],  # Maximum Number of Inner Loop Iterations (Minor Iterations)
            'stopcriteria': [int, 1],  # Stopping Criteria Flag
            'stopiters': [
                int, 20
            ],  # Consecutively Number of Outer Iterations for which the Stopping Criteria must be Satisfied
            'etol': [float,
                     0.0001],  # Absolute Tolerance for Equality constraints
            'itol': [float,
                     0.0001],  # Absolute Tolerance for Inequality constraints 
            'atol': [float,
                     0.0001],  # Absolute Tolerance for Objective Function 1e-6
            'rtol': [float,
                     0.0001],  # Relative Tolerance for Objective Function
            'prtoutiter':
            [int,
             0],  # Number of Iterations Before Print Outer Loop Information
            'prtinniter':
            [int,
             0],  # Number of Iterations Before Print Inner Loop Information
            'xinit':
            [int,
             0],  # Initial Position Flag (0 - no position, 1 - position given)
            'rinit': [float, 1.0],  # Initial Penalty Factor
            'fileout': [int,
                        store_history],  # Flag to Turn On Output to filename
            'filename': [
                str, 'parameters.txt'
            ],  # We could probably remove fileout flag if filename or fileinstance is given
            'seed':
            [float,
             0.5],  # Random Number Seed (0 - Auto-Seed based on time clock)
            'scaling': [
                int, 1
            ],  # Design Variables Scaling Flag (0 - no scaling, 1 - scaling between [-1,1]) 
        })

    res = opt_engine(opt_prob)

    return res