Exemple #1
0
    def overview(self, msr, pollution_threshold=2):
        """Give the overview of costs and st.dev for dem and minor embankments.
        """
        msr_type = msr.settings.loc['msr_type', 1]
        name = msr_type + '_' + msr.settings.loc['ID']

        # Separate between clean and polluted areas
        area_clean = pcr.ifthen(self.pollution_zones >= pollution_threshold,\
                                pcr.nominal(1))
        area_polluted = pcr.ifthen(self.pollution_zones < pollution_threshold,\
                                   pcr.nominal(1))

        area_clean = pcr.defined(msr.area) & pcr.defined(area_clean)
        area_clean = pcr.ifthen(area_clean, pcr.boolean(1))
        area_polluted = pcr.defined(msr.area) &\
                                    pcr.defined(area_polluted)
        area_polluted = pcr.ifthen(area_polluted, pcr.boolean(1))

        # Calculate costs and stddev for all earthwork types.
        flpl_low_values = self.dem_lowering(msr, area_polluted)
        minemb_low_values = self.minemb_lowering(msr, area_polluted)
        groyne_lowering_values = self.groyne_lowering(msr)
        cost_ew = pd.concat(
            [flpl_low_values, groyne_lowering_values, minemb_low_values])

        cost_df = cost_ew.iloc[:, 0:1].T
        cost_df.index = [name]
        std_df = cost_ew.iloc[:, 1:2].T
        std_df.index = [name]
        return cost_df, std_df
Exemple #2
0
def getValAtPoint(in_map, xcor, ycor):
    """
    returns the value in a map at the point given.
    works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - value
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    themap = pcr.pcr2numpy(in_map, np.nan)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_
    pt = point.argmax()

    return themap.ravel()[pt]
Exemple #3
0
def getValAtPoint(in_map, xcor, ycor):
    """
    returns the value in a map at the point given.
    works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - value
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    themap = pcr.pcr2numpy(in_map, np.nan)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_
    pt = point.argmax()

    return themap.ravel()[pt]
Exemple #4
0
def representativePoint(nominalMap):
    """Select a representative point for a nominal map
    """
    pcr.setglobaloption('unitcell')
    filled = pcr.cover(nominalMap, 0)
    edges = pcr.windowdiversity(filled, 3) > 1
    edges = pcr.ifthen(pcr.defined(nominalMap), edges)
    edges = map_edges(nominalMap) | edges
    dist = pcr.spread(edges, 0, 1)
    dist = dist + pcr.uniform(pcr.defined(nominalMap))
    points = dist == pcr.areamaximum(dist, nominalMap)
    return pcr.ifthen(points, nominalMap)
Exemple #5
0
def checkerboard(mapin, fcc):
    """
    checkerboard create a checkerboard map with unique id's in a
    fcc*fcc cells area. The resulting map can be used
    to derive statistics for (later) upscaling of maps (using the fcc factor)

    .. warning: use with unitcell to get most reliable results!

    Input:
        - map (used to determine coordinates)
        - fcc (size of the areas in cells)

    Output:
        - checkerboard type map
    """
    msker = pcr.defined(mapin)
    ymin = pcr.mapminimum(pcr.ycoordinate(msker))
    yc = (pcr.ycoordinate((msker)) - ymin) / pcr.celllength()
    yc = pcr.rounddown(yc / fcc)
    # yc = yc/fcc
    xmin = pcr.mapminimum(pcr.xcoordinate((msker)))
    xc = (pcr.xcoordinate((msker)) - xmin) / pcr.celllength()
    xc = pcr.rounddown(xc / fcc)
    # xc = xc/fcc

    yc = yc * (pcr.mapmaximum(xc) + 1.0)

    xy = pcr.ordinal(xc + yc)

    return xy
Exemple #6
0
def snaptomap(points, mmap):
    """
    Snap the points in _points_ to nearest non missing
    values in _mmap_. Can be used to move gauge locations
    to the nearest rivers.

    Input:
        - points - map with points to move
        - mmap - map with points to move to

    Return:
        - map with shifted points
    """
    points = pcr.cover(points, 0)
    # Create unique id map of mmap cells
    unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
    # Now fill holes in mmap map with lues indicating the closes mmap cell.
    dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
    # Get map with values at location in points with closes mmap cell
    dist_cellid = pcr.ifthenelse(points > 0, dist_cellid, 0)
    # Spread this out
    dist_fill = pcr.spreadzone(pcr.nominal(dist_cellid), 0, 1)
    # Find the new (moved) locations
    npt = pcr.uniqueid(pcr.boolean(pcr.ifthen(dist_fill == unq, unq)))
    # Now recreate the original value in the points maps
    ptcover = pcr.spreadzone(pcr.cover(points, 0), 0, 1)
    # Now get the org point value in the pt map
    nptorg = pcr.ifthen(npt > 0, ptcover)

    return nptorg
    def __init__(self, configuration, currTimeStep, initialState = None):
        self._configuration = configuration
        self._modelTime = currTimeStep
        
        pcr.setclone(configuration.cloneMap)

        # Read the ldd map.
        self.lddMap = vos.readPCRmapClone(\
                  configuration.routingOptions['lddMap'],
                  configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'],True)
        #ensure ldd map is correct, and actually of type "ldd"
        self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))
 
        if configuration.globalOptions['landmask'] != "None":
            self.landmask = vos.readPCRmapClone(\
            configuration.globalOptions['landmask'],
            configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'])
        else:
            self.landmask = pcr.defined(self.lddMap)
        
        # defining catchment areas
        self.catchment_class = 1.0
        
        # number of upperSoilLayers:
        self.numberOfSoilLayers = int(configuration.landSurfaceOptions['numberOfUpperSoilLayers'])

        self.createSubmodels(initialState)
Exemple #8
0
    def initial(self):
        """ initial part of the polder
         module
        """
        # ************************************************************
        # ***** POLDERS
        # ************************************************************
        settings = LisSettings.instance()
        option = settings.options
        binding = settings.binding

        if option['simulatePolders']:

            PolderSites = loadmap('PolderSites')
            PolderSites = pcraster.ifthen(
                (pcraster.defined(PolderSites) & self.var.IsChannel),
                PolderSites)
            # Get rid of any polders that are not part of the channel network
            # IMPORTANT: current implementation can become unstable with kin.
            # wave!!

            # Flag that is boolean(1) for polder sites and boolean(0) otherwise
            # total storage capacity of Polder area [m3]
            PolderArea = pcraster.lookupscalar(str(binding['TabPolderArea']),
                                               PolderSites)
            PolderLevel = binding['PolderInitialLevelValue']
            # Initial polder level [m]
            self.var.PolderStorageIniM3 = pcraster.cover(
                PolderLevel * PolderArea, pcraster.scalar(0.0))
            # Compute polder storage [m3]
            self.var.PolderStorageM3 = self.var.PolderStorageIniM3
Exemple #9
0
    def __init__(self, configuration, currTimeStep, initialState = None):
        self._configuration = configuration
        self._modelTime = currTimeStep
        
        pcr.setclone(configuration.cloneMap)

        # Read the ldd map.
        self.lddMap = vos.readPCRmapClone(\
                  configuration.routingOptions['lddMap'],
                  configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'],True)
        #ensure ldd map is correct, and actually of type "ldd"
        self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))
 
        if configuration.globalOptions['landmask'] != "None":
            self.landmask = vos.readPCRmapClone(\
            configuration.globalOptions['landmask'],
            configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'])
        else:
            self.landmask = pcr.defined(self.lddMap)
        
        # ADDED: variables necessary for 2-way coupling functions
        # ----------------------------------------------------------------------------------------------------------------
        # variable to control activation of 2-way coupling functions (can be changed through BMI)
        self.ActivateCoupling = self._configuration.globalOptions['ActivateCoupling']
        # ----------------------------------------------------------------------------------------------------------------
        
        # defining catchment areas
        self.catchment_class = 1.0
        
        # number of upperSoilLayers:
        self.numberOfSoilLayers = int(configuration.landSurfaceOptions['numberOfUpperSoilLayers'])

        self.createSubmodels(initialState)
Exemple #10
0
def snaptomap(points, mmap):
    """
    Snap the points in _points_ to nearest non missing
    values in _mmap_. Can be used to move gauge locations
    to the nearest rivers.

    Input:
        - points - map with points to move
        - mmap - map with points to move to

    Return:
        - map with shifted points
    """
    points = pcr.cover(points, 0)
    # Create unique id map of mmap cells
    unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
    # Now fill holes in mmap map with lues indicating the closes mmap cell.
    dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
    # Get map with values at location in points with closes mmap cell
    dist_cellid = pcr.ifthenelse(points > 0, dist_cellid, 0)
    # Spread this out
    dist_fill = pcr.spreadzone(pcr.nominal(dist_cellid), 0, 1)
    # Find the new (moved) locations
    npt = pcr.uniqueid(pcr.boolean(pcr.ifthen(dist_fill == unq, unq)))
    # Now recreate the original value in the points maps
    ptcover = pcr.spreadzone(pcr.cover(points, 0), 0, 1)
    # Now get the org point value in the pt map
    nptorg = pcr.ifthen(npt > 0, ptcover)

    return nptorg
Exemple #11
0
def checkerboard(mapin, fcc):
    """
    checkerboard create a checkerboard map with unique id's in a
    fcc*fcc cells area. The resulting map can be used
    to derive statistics for (later) upscaling of maps (using the fcc factor)

    .. warning: use with unitcell to get most reliable results!

    Input:
        - map (used to determine coordinates)
        - fcc (size of the areas in cells)

    Output:
        - checkerboard type map
    """
    msker = pcr.defined(mapin)
    ymin = pcr.mapminimum(pcr.ycoordinate(msker))
    yc = (pcr.ycoordinate((msker)) - ymin) / pcr.celllength()
    yc = pcr.rounddown(yc / fcc)
    # yc = yc/fcc
    xmin = pcr.mapminimum(pcr.xcoordinate((msker)))
    xc = (pcr.xcoordinate((msker)) - xmin) / pcr.celllength()
    xc = pcr.rounddown(xc / fcc)
    # xc = xc/fcc

    yc = yc * (pcr.mapmaximum(xc) + 1.0)

    xy = pcr.ordinal(xc + yc)

    return xy
Exemple #12
0
 def spatial(self):
     """Computes requruired biosafe output for a spatial domain"""
     
     #-determine a representative points for each floodplain section        
     points = pcrr.representativePoint(self.sections)
     clone = pcr.defined(self.sections)
     pcr.setglobaloption('unittrue')
     xcoor = pcr.xcoordinate(clone)
     ycoor = pcr.ycoordinate(clone)
     geoDf = pcrr.getCellValues(points, \
                             mapList = [points, xcoor, ycoor],\
                             columns = ['ID', 'xcoor', 'ycoor'])        
     geoDf.set_index('ID', inplace=True, drop=False)
     geoDf.drop(['rowIdx', 'colIdx', 'ID'], axis=1, inplace=True)
     
     #-compupte the required biosafe parameters for all sections
     sectionIDs = np.unique(pcr.pcr2numpy(self.sections,-9999))[1:]
     ll = []
     for sectionID in sectionIDs:
         ll.append(self.sectionScores(sectionID))
     paramLL = zip(*ll)
             
     dfParamLL = []
     for ii in range(len(self.params)):
         bsScores = pd.concat(paramLL[ii], axis=1).T
         bsScores = bsScores.join(geoDf)
         bsScores.index.name = 'ID'
         bsScores.columns.name = self.params[ii]
         dfParamLL.append(bsScores)
     
     return dfParamLL
Exemple #13
0
def getMinMaxMean(mapFile,ignoreEmptyMap=False):
    mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0]
    mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0]
    nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 ) [0] #/ getNumNonMissingValues(mapFile)
    if nrValues == 0.0 and ignoreEmptyMap: 
        return 0.0,0.0,0.0
    else:
        return mn,mx,(getMapTotal(mapFile) / nrValues)
Exemple #14
0
 def forest_removal(self, area):
     """Calculate costs and st.dev of forest removal."""
     forested_area = self.smoothing_classes == 3
     removal_area = pcr.ifthen((pcr.defined(area) & forested_area),
                               pcr.boolean(1))
     cost = area_total_value(self.smoothing_distr.mean, removal_area)
     std = area_total_value(self.smoothing_distr.stddev, removal_area)
     return cost, std
Exemple #15
0
def getMinMaxMean(mapFile,ignoreEmptyMap=False):
    mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0]
    mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0]
    nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 ) [0] #/ getNumNonMissingValues(mapFile)
    if nrValues == 0.0 and ignoreEmptyMap: 
        return 0.0,0.0,0.0
    else:
        return mn,mx,(getMapTotal(mapFile) / nrValues)
 def mapFilling(self, map_with_MV, map_without_MV, method = "window_average"):
 
     # ----- method 1: inverse distance method (but too slow)
     if method == "inverse_distance":
         logger.info('Extrapolation using "inverse distance" in progress!')
         #
         # - interpolation mask for cells without values
         interpolatedMask = pcr.ifthenelse(\
                            pcr.defined(map_with_MV),\
                            pcr.boolean(0),\
                            pcr.boolean(1),)
         map_with_MV_intrpl = pcr.inversedistance(interpolatedMask, \
                                                map_with_MV, 2, 1.50, 25)
     #
     else: # method 2: using window average
         logger.info('Extrapolation using "modified window average" in progress!')
         #
         map_with_MV_intrpl = 0.70 * pcr.windowaverage(map_with_MV, 1.50) + \
                              0.25 * pcr.windowaverage(map_with_MV, 2.00) + \
                              0.05 * pcr.windowaverage(map_with_MV, 2.50) + \
                              pcr.scalar(0.0)
     #
     # - interpolated values are only introduced in cells with MV 
     map_with_MV_intrpl = pcr.cover(map_with_MV, map_with_MV_intrpl)
     #
     # - calculating weight factor:
     weight_factor = pcr.scalar(pcr.defined(map_with_MV))
     weight_factor = pcr.windowaverage(0.70*weight_factor, 1.50) +\
                     pcr.windowaverage(0.25*weight_factor, 2.00) +\
                     pcr.windowaverage(0.05*weight_factor, 2.50)
     weight_factor = pcr.min(1.0, weight_factor)
     weight_factor = pcr.max(0.0, weight_factor)
     weight_factor = pcr.cover(weight_factor, 0.0)
     #
     # merge with weight factor
     merged_map = weight_factor  * map_with_MV_intrpl + \
           (1.0 - weight_factor) * map_without_MV
     #
     # retain the original values and make sure that all values are covered
     filled_map = pcr.cover(map_with_MV, merged_map)
     filled_map = pcr.cover(filled_map, map_without_MV)
 
     logger.info('Extrapolation is done!')
     return filled_map
Exemple #17
0
def points_to_map(in_map, xcor, ycor, tolerance):
    """
    Returns a map with non zero values at the points defined
    in X, Y pairs. It's goal is to replace the pcraster col2map program.

    tolerance should be 0.5 to select single points
    Performance is not very good and scales linear with the number of points


    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate (array or single value)
        - ycor - y coordinate (array or single value)
        - tolerance - tolerance in cell units. 0.5 selects a single cell\
        10 would select a 10x10 block of cells

    Output:
        - Map with values burned in. 1 for first point, 2 for second and so on
    """
    point = in_map * 0.0

    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    cell_length = float(pcr.celllength())

    # simple check to use both floats and numpy arrays
    try:
        c = xcor.ndim
    except:
        xcor = np.array([xcor])
        ycor = np.array([ycor])

    # Loop over points and "burn in" map
    for n in range(0, xcor.size):
        if Verbose:
            print(n)
        diffx = x - xcor[n]
        diffy = y - ycor[n]
        col_ = np.absolute(diffx) <= (cell_length * tolerance)  # cellsize
        row_ = np.absolute(diffy) <= (cell_length * tolerance)  # cellsize
        point = point + pcr.numpy2pcr(pcr.Scalar,
                                      ((col_ * row_) * (n + 1)), np.nan)

    return pcr.ordinal(point)
Exemple #18
0
def remove_fxw(fixed_weirs, removal_area):
    """
    Erase fixed weirs within the removal area. Line elements that get split 
    by the removal are relabeled.
    -------------
    Parameters
        fixed_weirs: Fixed weir DataFrame with Point geometry
        removal_area: Scalar map area, removal area indicated with -9999.
    
    Returns a fixed weir DataFrame.
    """
    # add label details as columns
    labels = fixed_weirs.label.str.split(':', expand=True)
    labels.columns = ['label_nr', 'label_type']
    fixed_weirs = pd.concat([fixed_weirs, labels], axis=1)

    # add reference value: 'selected' = 0:inside, 1:outside
    label_map = pcr.ifthenelse(pcr.defined(removal_area), pcr.scalar(0),
                               pcr.scalar(1))
    labeled_points = update_z(fixed_weirs.loc[:, list('abc')], label_map)
    fixed_weirs.loc[:, 'selected'] = labeled_points.z
    # 'parts' increases with 1 at every change from 1 to 0 and 0 to 1 in 'selected'
    fixed_weirs['parts'] = (1 + fixed_weirs.selected.diff().abs().cumsum())
    fixed_weirs.fillna(0, inplace=True)

    # take the minimum value of 'parts' for each fixed weir and subtract
    # from the cumulative value to reset the counter per fixed weir
    nr_parts_cum = fixed_weirs.groupby(by='label').min().parts
    nr_parts_cum = nr_parts_cum.fillna(0).reset_index()

    fxw2 = pd.merge(fixed_weirs,
                    nr_parts_cum,
                    on='label',
                    suffixes=['_l', '_r'])
    fxw2['parts'] = (1 + fxw2.parts_l - fxw2.parts_r)
    fxw2['new_label'] = fxw2.apply(lambda x: '{0}_{1}:{2}'.format(x.label_nr,
                                                           str(int(x.parts)),\
                                                           str(x.label_type)),\
                                                           axis=1)

    # clean up
    points_per_part = fxw2.loc[:,['new_label', 'selected']]\
                           .groupby('new_label')\
                           .count().reset_index()
    points_per_part.columns = ['new_label', 'nr_points']
    fxw3 = pd.merge(fxw2,
                    points_per_part,
                    on='new_label',
                    suffixes=['_l', '_r'])
    fxw_out = fxw3[(fxw3.nr_points > 1) & (fxw3.selected == 1)]
    fxw_out.drop(['label', 'label_nr', 'label_type', 'selected',\
                  'parts_l', 'parts_r', 'parts', 'nr_points'],\
                   axis = 1, inplace=True)
    fxw_out.rename(columns={'new_label': 'label'}, inplace=True)
    fxw_out.index = range(len(fxw_out))  # to overwrite duplicate indices
    return fxw_out
Exemple #19
0
def points_to_map(in_map, xcor, ycor, tolerance):
    """
    Returns a map with non zero values at the points defined
    in X, Y pairs. It's goal is to replace the pcraster col2map program.

    tolerance should be 0.5 to select single points
    Performance is not very good and scales linear with the number of points


    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate (array or single value)
        - ycor - y coordinate (array or single value)
        - tolerance - tolerance in cell units. 0.5 selects a single cell\
        10 would select a 10x10 block of cells

    Output:
        - Map with values burned in. 1 for first point, 2 for second and so on
    """
    point = in_map * 0.0

    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    cell_length = float(pcr.celllength())

    # simple check to use both floats and numpy arrays
    try:
        c = xcor.ndim
    except:
        xcor = np.array([xcor])
        ycor = np.array([ycor])

    # Loop over points and "burn in" map
    for n in range(0, xcor.size):
        if Verbose:
            print(n)
        diffx = x - xcor[n]
        diffy = y - ycor[n]
        col_ = np.absolute(diffx) <= (cell_length * tolerance)  # cellsize
        row_ = np.absolute(diffy) <= (cell_length * tolerance)  # cellsize
        point = point + pcr.numpy2pcr(pcr.Scalar, ((col_ * row_) * (n + 1)), np.nan)

    return pcr.ordinal(point)
Exemple #20
0
def spatialInterpolation2PCR(fieldArray, pcrType, MV):
    #-interpolates the field array to the full extent
    field = pcr.numpy2pcr(pcrType, fieldArray, MV)
    cellID = pcr.nominal(pcr.uniqueid(pcr.defined(field)))
    zoneID = pcr.spreadzone(cellID, 0, 1)
    if pcrType == pcr.Scalar:
        field = pcr.areaaverage(field, zoneID)
    else:
        field = pcr.areamajority(field, zoneID)
    return field
Exemple #21
0
    def sample(self, expression):
        """
    Sampling the current values of 'expression' at the given locations for the current timestep
    """

        arrayRowPos = self._userModel.currentTimeStep(
        ) - self._userModel.firstTimeStep()

        #if isinstance(expression, float):
        #  expression = pcraster.scalar(expression)

        try:
            # store the data type for tss file header
            if self._spatialDatatype == None:
                self._spatialDatatype = str(expression.dataType())
        except AttributeError as e:
            datatype, sep, tail = str(e).partition(" ")
            msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % (
                datatype)
            raise AttributeError(msg)

        if self._spatialIdGiven:
            if expression.dataType() == pcraster.Scalar or expression.dataType(
            ) == pcraster.Directional:
                tmp = pcraster.areaaverage(pcraster.spatial(expression),
                                           pcraster.spatial(self._spatialId))
            else:
                tmp = pcraster.areamajority(pcraster.spatial(expression),
                                            pcraster.spatial(self._spatialId))

            col = 0
            for cellIndex in self._sampleAddresses:
                value, valid = pcraster.cellvalue(tmp, cellIndex)
                if not valid:
                    value = Decimal("NaN")

                self._sampleValues[arrayRowPos][col] = value
                col += 1
        else:
            if expression.dataType() == pcraster.Scalar or expression.dataType(
            ) == pcraster.Directional:
                tmp = pcraster.maptotal(pcraster.spatial(expression))\
                      / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression))))
            else:
                tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\
                      pcraster.spatial(pcraster.nominal(1)))))

            value, valid = pcraster.cellvalue(tmp, 1)
            if not valid:
                value = Decimal("NaN")

            self._sampleValues[arrayRowPos] = value

        if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps():
            self._writeTssFile()
    def dynamic(self):
        
        # update model time using the current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())

        # reading
        data_available = True
        if data_available:
            input_value = vos.netcdf2PCRobjClone(ncFile  = self.input_netcdf['file_name'],
                                                 varName = self.input_netcdf['variable_name'],
                                                 dateInput = str(self.modelTime.fulldate),
                                                 useDoy = None,
                                                 cloneMapFileName = self.clone_map_file)
            data_available = True  
        
        else:
            print "No values are available for this date: "+str(self.modelTime)
            data_available = False 
        
        if data_available: output_value = input_value

        # upscaling
        if data_available and self.resample_factor > 1.0:
        
            # upscaling using cell area
            cell_area = pcr.ifthen(pcr.defined(output_value), self.cell_area)
            output_value_in_pcraster = \
                            vos.getValDivZero(\
                            pcr.areatotal(output_value*self.cell_area, self.unique_ids),\
                            pcr.areatotal(self.cell_area, self.unique_ids), vos.smallNumber)
            
            # resample to the output clone resolution 
            output_value = vos.regridToCoarse(pcr.pcr2numpy(output_value_in_pcraster, vos.MV),
                                              self.resample_factor, "max", vos.MV)

        # reporting
        if data_available:

            # time stamp 
            timestepPCR = self.modelTime.timeStepPCR
            timeStamp = datetime.datetime(self.modelTime.year,\
                                          self.modelTime.month,\
                                          self.modelTime.day,0)
            # write to netcdf 
            self.output.data2NetCDF(self.output_netcdf['file_name'],\
                                    self.output_netcdf['variable_name'],\
                                    output_value,\
                                    timeStamp)

        # closing the file at the end of
        if self.modelTime.isLastTimeStep(): self.output.close(self.output_netcdf['file_name'])
Exemple #23
0
def map_edges(clone):
    """Boolean map true map edges, false elsewhere"""

    pcr.setglobaloption('unittrue')
    xmin, xmax, ymin, ymax, nr_rows, nr_cols, cell_size = clone_attributes()
    clone = pcr.ifthenelse(pcr.defined(clone), pcr.boolean(1), pcr.boolean(1))
    x_coor = pcr.xcoordinate(clone)
    y_coor = pcr.ycoordinate(clone)
    north = y_coor > (ymax - cell_size)
    south = y_coor < (ymin + cell_size)
    west = x_coor < (xmin + cell_size)
    east = x_coor > (xmax - cell_size)
    edges = north | south | west | east
    return edges
Exemple #24
0
  def sample(self, expression):
    """
    Sampling the current values of 'expression' at the given locations for the current timestep
    """

    arrayRowPos = self._userModel.currentTimeStep() - self._userModel.firstTimeStep()

    #if isinstance(expression, float):
    #  expression = pcraster.scalar(expression)

    try:
      # store the data type for tss file header
      if self._spatialDatatype == None:
        self._spatialDatatype = str(expression.dataType())
    except AttributeError as e:
      datatype, sep, tail = str(e).partition(" ")
      msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % (datatype)
      raise AttributeError(msg)

    if self._spatialIdGiven:
      if expression.dataType() == pcraster.Scalar or expression.dataType() == pcraster.Directional:
        tmp = pcraster.areaaverage(pcraster.spatial(expression), pcraster.spatial(self._spatialId))
      else:
        tmp = pcraster.areamajority(pcraster.spatial(expression), pcraster.spatial(self._spatialId))

      col = 0
      for cellIndex in self._sampleAddresses:
        value, valid = pcraster.cellvalue(tmp, cellIndex)
        if not valid:
          value = Decimal("NaN")

        self._sampleValues[arrayRowPos][col] = value
        col += 1
    else:
      if expression.dataType() == pcraster.Scalar or expression.dataType() == pcraster.Directional:
         tmp = pcraster.maptotal(pcraster.spatial(expression))\
               / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression))))
      else:
         tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\
               pcraster.spatial(pcraster.nominal(1)))))

      value, valid = pcraster.cellvalue(tmp, 1)
      if not valid:
        value = Decimal("NaN")

      self._sampleValues[arrayRowPos] = value

    if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps():
       self._writeTssFile()
Exemple #25
0
def define_landmask(input_file, clone_map_file, output_map_file):

    # define the landmask based on the input     
    cmd = "gdalwarp -tr 0.5 0.5 -te -180 -90 180 90 -r max " + str(input_file) + " " + output_map_file + ".tif"
    print(cmd); os.system(cmd)
    cmd = "gdal_translate -of PCRaster " + output_map_file + ".tif " + output_map_file
    print(cmd); os.system(cmd)
    cmd = "mapattr -c " + clone_map_file + " " + output_map_file
    print(cmd); os.system(cmd)
    cmd = "rm " + output_map_file + ".*"
    print(cmd); os.system(cmd)
    
    landmask = pcr.defined(pcr.readmap(output_map_file))
    landmask = pcr.ifthen(landmask, landmask)
    # ~ pcr.aguila(landmask)
    
    return landmask
Exemple #26
0
    def __init__(self, configuration, currTimeStep, initialState=None):
        self._configuration = configuration
        self._modelTime = currTimeStep

        pcr.setclone(configuration.cloneMap)

        # Read the ldd map.
        self.lddMap = vos.readPCRmapClone(\
                  configuration.routingOptions['lddMap'],
                  configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'],True)
        #ensure ldd map is correct, and actually of type "ldd"
        self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))

        if configuration.globalOptions['landmask'] != "None":
            self.landmask = vos.readPCRmapClone(\
            configuration.globalOptions['landmask'],
            configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'])
        else:
            self.landmask = pcr.defined(self.lddMap)

        # defining catchment areas
        self.catchment_class = 1.0

        # number of upperSoilLayers:
        self.numberOfSoilLayers = int(
            configuration.landSurfaceOptions['numberOfUpperSoilLayers'])

        # preparing sub-modules
        self.createSubmodels(initialState)

        # option for debugging to PCR-GLOBWB version 1.0
        self.debug_to_version_one = False
        if configuration.debug_to_version_one: self.debug_to_version_one = True
        if self.debug_to_version_one:

            # preparing initial folder directory
            self.directory_for_initial_maps = vos.getFullPath(
                "initials/", self.configuration.mapsDir)
            if os.path.exists(self.directory_for_initial_maps):
                shutil.rmtree(self.directory_for_initial_maps)
            os.makedirs(self.directory_for_initial_maps)

            # dump the initial state
            self.dumpState(self.directory_for_initial_maps, "initial")
Exemple #27
0
    def __init__(self, model_setup):

        # In this part ("__init__"), we initiate pcraster frameworks and set the clone map.

        # initiate pcraster dynamic and monte carlo frameworks
        DynamicModel.__init__(self)
        MonteCarloModel.__init__(self)

        # make model_setup available for the entire method
        self.model_setup = model_setup

        # set the clone map based on DEM
        self.clone_map = self.model_setup['dem_file_name']
        pcr.setclone(self.clone_map)
        # - landmask - needed if we want to mask out some areas/cells
        self.landmask = pcr.defined(pcr.readmap(self.clone_map))

        # output folder
        self.output_folder = self.model_setup['output_folder']

        # initiate a netcdf writer
        self.netcdf_writer = output_netcdf_writer.OutputNetCDF(pcr.clone())
Exemple #28
0
    def __init__(self, configuration, currTimeStep):
        self._configuration = configuration
        self._modelTime = currTimeStep

        pcr.setclone(configuration.cloneMap)

        # read the ldd map
        self.lddMap = vos.netcdf2PCRobjCloneWithoutTime(configuration.modflowParameterOptions['channelNC'],'lddMap',\
                                                        configuration.cloneMap)
        # ensure ldd map is correct, and actually of type "ldd"
        self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))

        # defining the landmask map
        if configuration.globalOptions['landmask'] != "None":
            self.landmask = vos.readPCRmapClone(\
            configuration.globalOptions['landmask'],
            configuration.cloneMap,configuration.tmpDir,configuration.globalOptions['inputDir'])
        else:
            self.landmask = pcr.defined(self.lddMap)

        # preparing the sub-model(s)         - Currently, there is only one sub-model.
        self.createSubmodels()
Exemple #29
0
    pcraster.readmap(os.path.join(dataPath, "NELddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "NLddF000.out")),
    pcraster.readmap(os.path.join(dataPath, "NWLddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "SELddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "SLddF000.out")),
    pcraster.readmap(os.path.join(dataPath, "SWLddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "WLddF000.out")),
)
mldd.addStream(pcraster.readmap(os.path.join(dataPath, "ELddF000.out")))
mldd.setDem(pcraster.spatial(pcraster.scalar(1)))


upstream = mldd.upstream(pcraster.spatial(pcraster.scalar(1)))
pcraster.report(upstream, "upstream.map")

accuflux = mldd.accuflux(pcraster.ifthen(pcraster.defined(upstream), pcraster.spatial(pcraster.scalar(1))))
pcraster.report(accuflux, "accuflux.map")

dem = mldd.getDem()
pcraster.report(dem, "dem.map")

streamN, streamNE, streamE, streamSE, streamS, streamSW, streamW, streamNW = mldd.getStream()

pcraster.report(streamN, "streamN.map")
pcraster.report(streamNE, "streamNE.map")
pcraster.report(streamE, "streamE.map")
pcraster.report(streamSE, "streamSE.map")
pcraster.report(streamS, "streamS.map")
pcraster.report(streamSW, "streamSW.map")
pcraster.report(streamW, "streamW.map")
pcraster.report(streamNW, "streamNW.map")
Exemple #30
0
def main():

    # output folder
    clean_out_folder = True
    if os.path.exists(out_folder):
        if clean_out_folder:
            shutil.rmtree(out_folder)
            os.makedirs(out_folder)
    else:
        os.makedirs(out_folder)
    os.chdir(out_folder)
    os.system("pwd")

    # tmp folder
    tmp_folder = out_folder + "/tmp/"
    if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder)
    os.makedirs(tmp_folder)

    # set the clone map
    print("set the clone map")
    pcr.setclone(global_clone_map_file)

    # read ldd map
    print("define the ldd")
    # ~ ldd_map = pcr.readmap(global_ldd_inp_file)
    ldd_map     = pcr.lddrepair(pcr.lddrepair(pcr.ldd(vos.readPCRmapClone(v                = global_ldd_inp_file, \
                                                                          cloneMapFileName = global_clone_map_file, \
                                                                          tmpDir           = tmp_folder, \
                                                                          absolutePath     = None, \
                                                                          isLddMap         = True, \
                                                                          cover            = None, \
                                                                          isNomMap         = False))))

    # define the landmask
    if landmask_map_file == None:
        print("define the landmask based on the ldd input")
        # ~ landmask = pcr.defined(pcr.readmap(global_ldd_inp_file))
        landmask = pcr.defined(ldd_map)
        landmask = pcr.ifthen(landmask, landmask)
    else:
        print("define the landmask based on the input landmask_map_file")
        landmask = pcr.readmap(landmask_map_file)
        ldd_map = pcr.ifthen(landmask, pcr.cover(ldd_map, pcr.ldd(5)))
        ldd_map = pcr.lddrepair(pcr.lddrepair(pcr.ldd(ldd_map)))
        landmask = pcr.defined(ldd_map)
    landmask = pcr.ifthen(landmask, landmask)

    # save ldd files used
    # - global ldd
    cmd = "cp " + str(global_ldd_inp_file) + " ."
    print(cmd)
    os.system(cmd)
    # - ldd map that is used
    pcr.report(ldd_map, "lddmap_used.map")

    # make catchment map
    print("make catchment map")
    catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map))

    # read global subdomain file
    print("read global subdomain file")
    global_subdomain_map = vos.readPCRmapClone(
        v=global_subdomain_file,
        cloneMapFileName=global_clone_map_file,
        tmpDir=tmp_folder,
        absolutePath=None,
        isLddMap=False,
        cover=None,
        isNomMap=True)

    # set initial subdomain
    print("assign subdomains to all catchments")
    subdomains_initial = pcr.areamajority(global_subdomain_map, catchment_map)
    subdomains_initial = pcr.ifthen(landmask, subdomains_initial)

    pcr.aguila(subdomains_initial)

    pcr.report(subdomains_initial, "global_subdomains_initial.map")

    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[0])))
    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1])))

    print("Checking all subdomains, avoid too large subdomains")

    num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1])

    # clone code that will be assigned
    assigned_number = 0

    subdomains_final = pcr.ifthen(
        pcr.scalar(subdomains_initial) < -7777, pcr.nominal(0))

    for nr in range(1, num_of_masks + 1, 1):

        msg = "Processing the landmask %s" % (str(nr))
        print(msg)

        mask_selected_boolean = pcr.ifthen(subdomains_initial == nr,
                                           pcr.boolean(1.0))

        process_this_clone = False
        if pcr.cellvalue(pcr.mapmaximum(pcr.scalar(mask_selected_boolean)), 1,
                         1)[0] > 0:
            process_this_clone = True

        # ~ if nr == 1: pcr.aguila(mask_selected_boolean)

        # - initial check value
        check_ok = True

        if process_this_clone:
            xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean)
            area_in_degree2 = (xmax - xmin) * (ymax - ymin)

            # ~ print(str(area_in_degree2))

            # check whether the size of bounding box is ok
            reference_area_in_degree2 = 2500.
            if area_in_degree2 > 1.50 * reference_area_in_degree2:
                check_ok = False
            if (xmax - xmin) > 10 * (ymax - ymin): check_ok = False

        # ~ # ignore checking
        # ~ check_ok = True

        if check_ok == True and process_this_clone == True:

            msg = "Clump is not needed."
            msg = "\n\n" + str(msg) + "\n\n"
            print(msg)

            # assign the clone code
            assigned_number = assigned_number + 1

            # update global landmask for river and land
            mask_selected_nominal = pcr.ifthen(mask_selected_boolean,
                                               pcr.nominal(assigned_number))
            subdomains_final = pcr.cover(subdomains_final,
                                         mask_selected_nominal)

        if check_ok == False and process_this_clone == True:

            msg = "Clump is needed."
            msg = "\n\n" + str(msg) + "\n\n"
            print(msg)

            # make clump
            clump_ids = pcr.nominal(pcr.clump(mask_selected_boolean))

            # merge clumps that are close together
            clump_ids_window_majority = pcr.windowmajority(clump_ids, 10.0)
            clump_ids = pcr.areamajority(clump_ids_window_majority, clump_ids)
            # ~ pcr.aguila(clump_ids)

            # minimimum and maximum values
            min_clump_id = int(
                pcr.cellvalue(pcr.mapminimum(pcr.scalar(clump_ids)), 1)[0])
            max_clump_id = int(
                pcr.cellvalue(pcr.mapmaximum(pcr.scalar(clump_ids)), 1)[0])

            for clump_id in range(min_clump_id, max_clump_id + 1, 1):

                msg = "Processing the clump %s of %s from the landmask %s" % (
                    str(clump_id), str(max_clump_id), str(nr))
                msg = "\n\n" + str(msg) + "\n\n"
                print(msg)

                # identify mask based on the clump
                mask_selected_boolean_from_clump = pcr.ifthen(
                    clump_ids == pcr.nominal(clump_id), mask_selected_boolean)
                mask_selected_boolean_from_clump = pcr.ifthen(
                    mask_selected_boolean_from_clump,
                    mask_selected_boolean_from_clump)

                # check whether the clump is empty
                check_mask_selected_boolean_from_clump = pcr.ifthen(
                    mask_selected_boolean, mask_selected_boolean_from_clump)
                check_if_empty = float(
                    pcr.cellvalue(
                        pcr.mapmaximum(
                            pcr.scalar(
                                pcr.defined(
                                    check_mask_selected_boolean_from_clump))),
                        1)[0])

                if check_if_empty == 0.0:

                    msg = "Map is empty !"
                    msg = "\n\n" + str(msg) + "\n\n"
                    print(msg)

                else:

                    msg = "Map is NOT empty !"
                    msg = "\n\n" + str(msg) + "\n\n"
                    print(msg)

                    # assign the clone code
                    assigned_number = assigned_number + 1

                    # update global landmask for river and land
                    mask_selected_nominal = pcr.ifthen(
                        mask_selected_boolean_from_clump,
                        pcr.nominal(assigned_number))
                    subdomains_final = pcr.cover(subdomains_final,
                                                 mask_selected_nominal)

    # ~ # kill all aguila processes if exist
    # ~ os.system('killall aguila')

    pcr.aguila(subdomains_final)

    print("")
    print("")
    print("")

    print("The subdomain map is READY.")

    pcr.report(subdomains_final, "global_subdomains_final.map")

    num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_final))[1])
    print(num_of_masks)

    print("")
    print("")
    print("")

    print("Making the clone and landmask maps for all subdomains")

    num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_final))[1])

    # clone and mask folders
    clone_folder = out_folder + "/clone/"
    if os.path.exists(clone_folder): shutil.rmtree(clone_folder)
    os.makedirs(clone_folder)
    mask_folder = out_folder + "/mask/"
    if os.path.exists(mask_folder): shutil.rmtree(mask_folder)
    os.makedirs(mask_folder)

    print("")
    print("")

    for nr in range(1, num_of_masks + 1, 1):

        msg = "Processing the subdomain %s" % (str(nr))
        print(msg)

        # set the global clone
        pcr.setclone(global_clone_map_file)

        mask_selected_boolean = pcr.ifthen(subdomains_final == nr,
                                           pcr.boolean(1.0))

        mask_selected_nominal = pcr.ifthen(subdomains_final == nr,
                                           pcr.nominal(nr))
        mask_file = "mask/mask_%s.map" % (str(nr))
        pcr.report(mask_selected_nominal, mask_file)

        xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean)
        area_in_degree2 = (xmax - xmin) * (ymax - ymin)

        print(
            str(nr) + " ; " + str(area_in_degree2) + " ; " +
            str((xmax - xmin)) + " ; " + str((ymax - ymin)))

        # cellsize in arcdegree
        cellsize = cellsize_in_arcmin / 60.

        # number of rows and cols
        num_rows = int(round(ymax - ymin) / cellsize)
        num_cols = int(round(xmax - xmin) / cellsize)

        # make the clone map using mapattr
        clonemap_mask_file = "clone/clonemap_mask_%s.map" % (str(nr))
        cmd = "mapattr -s -R %s -C %s -B -P yb2t -x %s -y %s -l %s %s" % (
            str(num_rows), str(num_cols), str(xmin), str(ymax), str(cellsize),
            clonemap_mask_file)
        print(cmd)
        os.system(cmd)

        # set the local landmask for the clump
        pcr.setclone(clonemap_mask_file)
        local_mask = vos.readPCRmapClone(v = mask_file, \
                                         cloneMapFileName = clonemap_mask_file,
                                         tmpDir = tmp_folder, \
                                         absolutePath = None, isLddMap = False, cover = None, isNomMap = True)
        local_mask_boolean = pcr.defined(local_mask)
        local_mask_boolean = pcr.ifthen(local_mask_boolean, local_mask_boolean)
        pcr.report(local_mask_boolean, mask_file)

    print("")
    print("")
    print("")

    print(num_of_masks)
def main():

    # make output directory
    try:
        os.makedirs(output_directory)
    except:
        if cleanOutputDir == True:
            os.system('rm -r ' + output_directory + "/*")

    # change the current directory/path to output directory
    os.chdir(output_directory)

    # make temporary directory
    tmp_directory = output_directory + "/tmp"
    os.makedirs(tmp_directory)
    vos.clean_tmp_dir(tmp_directory)

    # format and initialize logger
    logger_initialize = Logger(output_directory)

    logger.info('Start processing for 5 arc-min resolution!')

    # clone and landmask for 5 arc-min resolution
    pcr.setclone(clone_map_05min_file)
    landmask = pcr.defined(clone_map_05min_file)

    # read thickness value (at 5 arc min resolution)
    logger.info('Reading the thickness at 5 arc-min resolution!')
    thickness = pcr.ifthen(landmask,\
                vos.netcdf2PCRobjCloneWithoutTime(thickness_05min_netcdf['filename'], "average_corrected", clone_map_05min_file))
    #
    # update landmask
    landmask = pcr.defined(thickness)

    # read aquifer properties at 5 arc min resolution
    logger.info(
        'Reading saturated conductivity and specific yield at 5 arc-min resolution!'
    )
    saturated_conductivity = pcr.ifthen(landmask,\
                             vos.netcdf2PCRobjCloneWithoutTime(\
                             aquifer_properties_05min_netcdf['filename'],\
                             "kSatAquifer"  , clone_map_05min_file))
    specific_yield         = pcr.ifthen(landmask,\
                             vos.netcdf2PCRobjCloneWithoutTime(\
                             aquifer_properties_05min_netcdf['filename'],\
                             "specificYield", clone_map_05min_file))

    # saving 5 min parameters to a netcdf file
    logger.info('Saving groundwater parameter parameters to a netcdf file: ' +
                output_05min_filename)
    #
    output_05min_netcdf = outputNetCDF.OutputNetCDF(clone_map_05min_file)
    #
    variable_names = ["saturated_conductivity", "specific_yield", "thickness"]
    units = ["m/day", "1", "m"]
    variable_fields = [
        pcr.pcr2numpy(saturated_conductivity, vos.MV),
        pcr.pcr2numpy(specific_yield, vos.MV),
        pcr.pcr2numpy(thickness, vos.MV),
    ]
    pcr.report(saturated_conductivity, "saturated_conductivity_05min.map")
    pcr.report(specific_yield, "specific_yield_05min.map")
    pcr.report(thickness, "thickness_05min.map")
    output_05min_netcdf.createNetCDF(output_05min_filename, variable_names,
                                     units)
    output_05min_netcdf.changeAtrribute(output_05min_filename,
                                        netcdf_attributes)
    output_05min_netcdf.data2NetCDF(output_05min_filename, variable_names,
                                    variable_fields)

    logger.info('Start processing for 30 arc-min resolution!')

    # upscaling thickness to 30 arc min resolution
    logger.info('Upscaling thickness from 5 arc-min resolution to 30 arc-min!')
    thickness_05min_array = pcr.pcr2numpy(thickness, vos.MV)
    thickness_30min_array = vos.regridToCoarse(thickness_05min_array, 30. / 5.,
                                               "average")
    #
    # set clone for 30 arc min resolution
    pcr.setclone(clone_map_30min_file)
    #
    landmask = pcr.defined(clone_map_30min_file)
    thickness = pcr.ifthen(
        landmask, pcr.numpy2pcr(pcr.Scalar, thickness_30min_array, vos.MV))
    #
    # update landmask
    landmask = pcr.defined(thickness)

    # read aquifer properties at 30 arc min resolution
    logger.info(
        'Reading saturated conductivity and specific yield at 30 arc-min resolution!'
    )
    saturated_conductivity = pcr.ifthen(landmask,\
                             vos.netcdf2PCRobjCloneWithoutTime(\
                             aquifer_properties_30min_netcdf['filename'],\
                             "kSatAquifer"  , clone_map_30min_file))
    specific_yield         = pcr.ifthen(landmask,\
                             vos.netcdf2PCRobjCloneWithoutTime(\
                             aquifer_properties_30min_netcdf['filename'],\
                             "specificYield", clone_map_30min_file))

    # saving 30 min parameters to a netcdf file
    logger.info('Saving groundwater parameter parameters to a netcdf file: ' +
                output_30min_filename)
    #
    output_30min_netcdf = outputNetCDF.OutputNetCDF(clone_map_30min_file)
    #
    variable_names = ["saturated_conductivity", "specific_yield", "thickness"]
    units = ["m/day", "1", "m"]
    variable_fields = [
        pcr.pcr2numpy(saturated_conductivity, vos.MV),
        pcr.pcr2numpy(specific_yield, vos.MV),
        pcr.pcr2numpy(thickness, vos.MV),
    ]
    pcr.report(saturated_conductivity, "saturated_conductivity_30min.map")
    pcr.report(specific_yield, "specific_yield_30min.map")
    pcr.report(thickness, "thickness_30min.map")
    output_30min_netcdf.createNetCDF(output_30min_filename, variable_names,
                                     units)
    output_30min_netcdf.changeAtrribute(output_30min_filename,
                                        netcdf_attributes)
    output_30min_netcdf.data2NetCDF(output_30min_filename, variable_names,
                                    variable_fields)
output_filename = output_folder + "/average_local.map"
pcr.report(average_local, output_filename)

# calculate the average value/map of the global model
msg = "Calculate the average value/map of the global model!"
print(msg)
# - Using the top layer of the global model
global_model_folder = "/scratch-shared/edwinhs/modflow_results_in_pcraster/upper_layer/regional/"
i_month = 0
cum_map = pcr.scalar(0.0)
for year in range(str_year, end_year + 1, 1):
    for month in range(1, 12 + 1, 1):
        # file name, e.g. /scratch-shared/edwinhs/modflow_results_in_pcraster/upper_layer/regional/htop_2000_01.map
        file_name = "htop_%04d_%02d" % (year, month)
        file_name = global_model_folder + "/" + file_name + ".map"
        print(file_name)
        # cummulative values
        cum_map = cum_map + pcr.readmap(file_name)
        i_month = i_month + 1
# calculating average and saving it to a pcraster map
average_global = cum_map / i_month
# use only the values where local model exists:
average_global = pcr.ifthen(pcr.defined(average_local), average_global)
pcr.aguila(average_global)
pcr.report(average_global, output_folder + "/average_global.map")

# evaluating/comparing two maps
difference = average_local - average_global
pcr.aguila(difference)
pcr.report(difference, output_folder + "/bias.map")
# clone/mask maps
number_of_clone_maps = 53
areas = ['M%02d'%i for i in range(1,number_of_clone_maps+1,1)]



########################################################################
# MAIN SCRIPT
########################################################################

# change the working directory to the "maps folder"
os.chdir(outputDir)

# set the landmask of 5 arcmin model
landmask_05_min_file = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
landmask_05_min = pcr.defined(pcr.readmap(landmask_05_min_file))
landmask_used = pcr.ifthen(landmask_05_min, landmask_05_min)
landmask_used = pcr.boolean(pcr.windowmaximum(pcr.scalar(landmask_used), 0.5))
pcr.report(landmask_used, "extended_landmask_5min.map")

# set the clone at high resolution 
msg = "Make and set the clone map."
logger.info(msg)
# - number of rows and clones
nrRows = int((latMax-latMin)/deltaLat)
nrCols = int((lonMax-lonMin)/deltaLon)
# - make and set the clone map
tempCloneMap = outputDir+'/temp_clone.map'
command = 'mapattr -s -R %d -C %d -P "yb2t"	-B -x %f -y %f -l %f %s' %\
	(nrRows,nrCols,lonMin,latMax,deltaLat,tempCloneMap)
vos.cmd_line(command, using_subprocess = False)
    except:
        pass
    try:
        os.makedirs(tmp_directory)    
    except:
        pass
            
    # initiate the netcd file and object: 
    tssNetCDF = ConvertMapsToNetCDF4(cloneMapFile = cloneMapFileName, attribute = attributeDictionary)
    tssNetCDF.createNetCDF(ncFileName,varNames,varUnits)

    index = 0 # for posCnt
    
    # set clone and define land mask region
    pcr.setclone(landmaskFile)
    landmask = pcr.defined(pcr.readmap(landmaskFile))
    landmask = pcr.ifthen(landmask, landmask)
    
    # cell area (m2)
    cellArea = vos.readPCRmapClone(cellAreaFile,
                                   cloneMapFileName,tmp_directory)
    cellArea = pcr.ifthen(landmask,cellArea)
    
    for iYear in range(staYear,endYear+1):

        # time stamp
        timeStamp = datetime.datetime(int(iYear),int(1),int(1),int(0))
        fulldate = '%4i-%02i-%02i' %(int(iYear),int(1),int(1))
        print fulldate

        # index for time object in the netcdf file:
def main():
	#-initialization
	# MVs
	MV= -999.
	# minimum catchment size to process
	catchmentSizeLimit= 0.0
	# period of interest, start and end year
	startYear= 1961
	endYear= 2010
	# maps
	cloneMapFileName= '/data/hydroworld/PCRGLOBWB20/input30min/global/Global_CloneMap_30min.map'
	lddFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map'
	cellAreaFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/cellarea30min.map'
	# set clone 
	pcr.setclone(cloneMapFileName)
	# output
	outputPath= '/scratch/rens/reservedrecharge'
	percentileMapFileName= os.path.join(outputPath,'q%03d_cumsec.map')
	textFileName= os.path.join(outputPath,'groundwater_environmentalflow_%d.txt')
	fractionReservedRechargeMapFileName= os.path.join(outputPath,'fraction_reserved_recharge%d.map')
	fractionMinimumReservedRechargeMapFileName= os.path.join(outputPath,'minimum_fraction_reserved_recharge%d.map')
	# input
	inputPath= '/nfsarchive/edwin-emergency-backup-DO-NOT-DELETE/rapid/edwin/05min_runs_results/2015_04_27/non_natural_2015_04_27/global/netcdf/'
	# define data to be read from netCDF files
	ncData= {}
	variableName= 'totalRunoff'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'qloc')
	ncData[variableName]['annualAverage']= pcr.scalar(0)	
	variableName= 'gwRecharge'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'gwRecharge_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'gwrec')
	ncData[variableName]['annualAverage']= pcr.scalar(0)
	variableName= 'discharge'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'qc')
	ncData[variableName]['annualAverage']= pcr.scalar(0)
	ncData[variableName]['mapStack']= np.array([])
	# percents and environmental flow condition set as percentile
	percents= range(10,110,10)
	environmentalFlowPercent= 10
	if environmentalFlowPercent not in percents:
		percents.append(environmentalFlowPercent)
		percents.sort()

	#-start
	# obtain attributes
	pcr.setclone(cloneMapFileName)
	cloneSpatialAttributes= spatialAttributes(cloneMapFileName)
	years= range(startYear,endYear+1)
	# output path
	if not os.path.isdir(outputPath):
		os.makedirs(outputPath)
	os.chdir(outputPath)
	# compute catchments
	ldd= pcr.readmap(lddFileName)
	cellArea= pcr.readmap(cellAreaFileName)
	catchments= pcr.catchment(ldd,pcr.pit(ldd))
	fractionWater= pcr.scalar(0.0) # temporary!
	lakeMask= pcr.boolean(0) # temporary!
	pcr.report(catchments,os.path.join(outputPath,'catchments.map'))
	maximumCatchmentID= int(pcr.cellvalue(pcr.mapmaximum(pcr.scalar(catchments)),1)[0])
	# iterate over years
	weight= float(len(years))**-1
	for year in years:
		#-echo year
		print ' - processing year %d' % year
		#-process data
		startDate= datetime.datetime(year,1,1)
		endDate= datetime.datetime(year,12,31)
		timeSteps= endDate.toordinal()-startDate.toordinal()+1
		dynamicIncrement= 1
		for variableName in ncData.keys():
			print '   extracting %s' % variableName,
			ncFileIn= ncData[variableName]['fileName']
			#-process data
			pcrDataSet= pcrObject(variableName, ncData[variableName]['fileRoot'],\
				ncFileIn,cloneSpatialAttributes, pcrVALUESCALE= pcr.Scalar, resamplingAllowed= True,\
				dynamic= True, dynamicStart= startDate, dynamicEnd= endDate, dynamicIncrement= dynamicIncrement, ncDynamicDimension= 'time')
			pcrDataSet.initializeFileInfo()
			pcrDataSet.processFileInfo()
			for fileInfo in pcrDataSet.fileProcessInfo.values()[0]:
				tempFileName= fileInfo[1]
				variableField= pcr.readmap(tempFileName)
				variableField= pcr.ifthen(pcr.defined(ldd),pcr.cover(variableField,0))
				if variableName == 'discharge':
					dayNumber= int(os.path.splitext(tempFileName)[1].strip('.'))
					date= datetime.date(year,1,1)+datetime.timedelta(dayNumber-1)
					numberDays= calendar.monthrange(year,date.month)[1]
					variableField= pcr.max(0,pcr.catchmenttotal(variableField*cellArea,ldd)/(numberDays*24*3600))
				ncData[variableName]['annualAverage']+= weight*variableField
				if 'mapStack' in ncData[variableName].keys():
					tempArray= pcr2numpy(variableField,MV)
					mask= tempArray != MV
					if ncData[variableName]['mapStack'].size != 0:
						ncData[variableName]['mapStack']= np.vstack((ncData[variableName]['mapStack'],tempArray[mask]))
					else:
						ncData[variableName]['mapStack']= tempArray[mask]
						coordinates= np.zeros((ncData[variableName]['mapStack'].size,2))
						pcr.setglobaloption('unitcell')
						tempArray= pcr2numpy(pcr.ycoordinate(pcr.boolean(1))+0.5,MV)
						coordinates[:,0]= tempArray[mask]
						tempArray= pcr2numpy(pcr.xcoordinate(pcr.boolean(1))+0.5,MV)
						coordinates[:,1]= tempArray[mask]      
				os.remove(tempFileName)				
			# delete object
			pcrDataSet= None
			del pcrDataSet
			# close line on screen
			print
	# report annual averages
	key= 'annualAverage'
	ncData['discharge'][key]/= 12
	for variableName in ncData.keys():
		ncData[variableName][key]= pcr.max(0,ncData[variableName][key])
		pcr.report(ncData[variableName][key],\
			os.path.join(outputPath,'%s_%s.map' % (variableName,key)))
	# remove aux.xml
	for tempFileName in os.listdir(outputPath):
		if 'aux.xml' in tempFileName:
			os.remove(tempFileName)
	# sort data
	print 'sorting discharge data'
	variableName= 'discharge'
	key= 'mapStack'
	indices= np.zeros((ncData[variableName][key].shape),np.uint)
	for iCnt in xrange(ncData[variableName][key].shape[1]):
		indices[:,iCnt]= ncData[variableName][key][:,iCnt].argsort(kind= 'mergesort')
		ncData[variableName][key][:,iCnt]= ncData[variableName][key][:,iCnt][indices[:,iCnt]]
	# extract values for percentiles
	print 'returning maps'
	for percent in percents:
		percentile= 0.01*percent
		index0= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0]))
		index1= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])+1)
		x0= float(index0)/ncData[variableName][key].shape[0]
		x1= float(index1)/ncData[variableName][key].shape[0]
		if x0 <> x1:
			y= ncData[variableName][key][index0,:]+(percentile-x0)*\
				 (ncData[variableName][key][index1,:]-ncData[variableName][key][index0,:])/(x1-x0)
		else:
			y= ncData[variableName][key][index0,:]
		# convert a slice of the stack into an array
		tempArray= np.ones((cloneSpatialAttributes.numberRows,cloneSpatialAttributes.numberCols))*MV
		for iCnt in xrange(coordinates.shape[0]):
			row= coordinates[iCnt,0]-1
			col= coordinates[iCnt,1]-1
			tempArray[row,col]= y[iCnt]
		variableField= numpy2pcr(pcr.Scalar,tempArray,MV)
		pcr.report(variableField,percentileMapFileName % percent)
		if percent == environmentalFlowPercent:
			ncData[variableName]['environmentalFlow']= variableField
		tempArray= None; variableField= None
		del tempArray, variableField
	# process environmental flow
	# initialize map of reserved recharge fraction
	fractionReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0))
	fractionMinimumReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0))
	textFile= open(textFileName % environmentalFlowPercent,'w')
	hStr= 'Environmental flow analysis per basin, resulting in a map of renewable, exploitable recharge, for the %d%s quantile of discharge\n' % (environmentalFlowPercent,'%')
	hStr+= 'Returns Q_%d/R, the fraction of reserved recharge needed to sustain fully the environental flow requirement defined as the %d percentile,\n' % (environmentalFlowPercent, environmentalFlowPercent)
	hStr+= 'and Q*_%d/R, a reduced fraction that takes the availability of surface water into account\n' % environmentalFlowPercent
	textFile.write(hStr)
	print hStr
	# create header to display on screen and write to file
	# reported are: 1: ID, 2: Area, 3: average discharge, 4: environmental flow, 5: average recharge,
	# 6: Q_%d/Q, 7: Q_%d/R_Avg, 8: R_Avg/Q_Avg, 9: Q*_%d/R_Avg
	hStr= '%6s,%15s,%15s,%15s,%15s,%15s,%15s,%15s,%15s\n' % \
		('ID','Area [km2]','Q_Avg [m3]','Q_%d [m3]' % environmentalFlowPercent ,'R_Avg [m3]','Q_%d/Q_Avg [-]' % environmentalFlowPercent,\
			'Q_%d/Q_Avg [-]' % environmentalFlowPercent,'R_Avg/Q_Avg [-]','Q*_%d/Q_Avg [-]' % environmentalFlowPercent)
	textFile.write(hStr)
	print hStr
	for catchment in xrange(1,maximumCatchmentID+1):
		# create catchment mask and check whether it does not coincide with a lake
		catchmentMask= catchments == catchment
		catchmentSize= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,cellArea*1.e-6)),1)[0]
		#~ ##~ if pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(lakeMask))),1) <> \
				#~ ##~ pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(catchmentMask))),1)[0] and \
				#~ ##~ catchmentSize > catchmentSizeLimit:
		key= 'annualAverage'
		variableName= 'discharge'			
		if bool(pcr.cellvalue(pcr.maptotal(pcr.ifthen((ldd == 5) & catchmentMask,\
				pcr.scalar(ncData[variableName][key] > 0))),1)[0]) and catchmentSize >= catchmentSizeLimit:
			# valid catchment, process
			# all volumes are in m3 per year
			key= 'annualAverage'
			catchmentAverageDischarge= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\
				ncData[variableName][key])),1)[0]*365.25*3600*24
			variableName= 'gwRecharge'
			catchmentRecharge= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\
				(1.-fractionWater)*cellArea)),1)[0]
			variableName= 'totalRunoff'
			catchmentRunoff= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\
				cellArea)),1)[0]
			key= 'environmentalFlow'
			variableName= 'discharge'			
			catchmentEnvironmentalFlow= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\
				ncData[variableName][key])),1)[0]*365.25*3600*24
			catchmentRunoff= max(catchmentRunoff,catchmentEnvironmentalFlow)
			if catchmentAverageDischarge > 0.:
				fractionEnvironmentalFlow= catchmentEnvironmentalFlow/catchmentAverageDischarge
				fractionGroundWaterContribution= catchmentRecharge/catchmentAverageDischarge
			else:
				fractionEnvironmentalFlow= 0.
				fractionGroundWaterContribution= 0.
			if catchmentRecharge > 0:
				fractionReservedRecharge= min(1,catchmentEnvironmentalFlow/catchmentRecharge)
			else:
				fractionReservedRecharge= 1.0
			fractionMinimumReservedRecharge= (fractionReservedRecharge+fractionGroundWaterContribution-\
				fractionReservedRecharge*fractionGroundWaterContribution)*fractionReservedRecharge
			#~ # echo to screen, and write to file and map
			wStr= '%6s,%15.1f,%15.6g,%15.6g,%15.6g,%15.6f,%15.6f,%15.6f,%15.6f\n' % \
				(catchment,catchmentSize,catchmentAverageDischarge,catchmentEnvironmentalFlow,catchmentRecharge,\
					fractionEnvironmentalFlow,fractionReservedRecharge,fractionGroundWaterContribution,fractionMinimumReservedRecharge)
			print wStr
			textFile.write(wStr)
			# update maps
			fractionReservedRechargeMap= pcr.ifthenelse(catchmentMask,\
				pcr.scalar(fractionReservedRecharge),fractionReservedRechargeMap)
			fractionMinimumReservedRechargeMap= pcr.ifthenelse(catchmentMask,\
				pcr.scalar(fractionMinimumReservedRecharge),fractionMinimumReservedRechargeMap)
	#-report map and close text file
	pcr.report(fractionReservedRechargeMap,fractionReservedRechargeMapFileName % environmentalFlowPercent)
	pcr.report(fractionMinimumReservedRechargeMap,fractionMinimumReservedRechargeMapFileName % environmentalFlowPercent)
	# close text file
	textFile.close()
	# finished
	print 'all done!'
Exemple #36
0
# -- an example: WATCH historical: "/scratch-shared/edwinhs-last/flood_analyzer_output/gumbel_fits/watch_1960-1999/"
#
input_files['file_name'] = {}
input_files['file_name']['channelStorage']    = input_files['folder']['channelStorage']    + "/" + "gumbel_analysis_output_for_channel_storage.nc" 
input_files['file_name']['surfaceWaterLevel'] = input_files['folder']['surfaceWaterLevel'] + "/" + "gumbel_analysis_output_for_surface_water_level.nc" 
#
# general input files
# - clone map
input_files['clone_map_05min'] = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
pcr.setclone(input_files['clone_map_05min'])
# - cell area, ldd maps
input_files['cell_area_05min'] = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/cellsize05min.correct.map"
input_files['ldd_map_05min'  ] = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
# - landmask
# -- default based on ldd
landmask = pcr.defined(pcr.readmap(input_files['ldd_map_05min']))
# -- additional landmask (to exclude river basins with limited meteo forcing coverage)
input_files['landmask_file']   = "/projects/0/aqueduct/users/edwinsut/data/landmasks_for_extreme_value_analysis_and_downscaling/landmask_extreme_value_analysis/landmask_extreme_value_analysis_05min.map"
if input_files['landmask_file'] != None:
    landmask = pcr.ifthen(landmask, pcr.readmap(input_files['landmask_file']))
#~ pcr.aguila(landmask)

# output files
output_files                   = {}
#
# output folder
#
#~ # - WATCH historical
#~ output_files['folder']      = "/scratch-shared/edwinhs-last/flood_analyzer_output/extreme_values/watch_1960-1999/"
#~ # - gfdl-esm2m historical
#~ output_files['folder']      = "/scratch-shared/edwinhs-last/flood_analyzer_output/extreme_values/gfdl-esm2m_1960-1999/"
                             attribute = attributeDictionary, \
                             cellSizeInArcMinutes = cellSizeInArcMinutes)
    # making netcdf files:
    for var in variable_names:
        tssNetCDF.createNetCDF(output[var]['file_name'], var, output[var]['unit'])

    # class (country) ids
    uniqueIDsFile = "/projects/0/dfguu/users/edwin/data/country_shp_from_tianyi/World_Polys_High.map"
    uniqueIDs = pcr.nominal(\
                vos.readPCRmapClone(uniqueIDsFile, cloneMapFileName, tmp_directory, 
                                    None, False, None, True))
    uniqueIDs = pcr.readmap(uniqueIDsFile)
    uniqueIDs = pcr.ifthen(pcr.scalar(uniqueIDs) >= 0.0, uniqueIDs)
    
    # landmask                               
    landmask = pcr.defined(pcr.readmap(landmask05minFile))
    landmask = pcr.ifthen(landmask, landmask)
    # - extending landmask with uniqueIDs
    landmask = pcr.cover(landmask, pcr.defined(uniqueIDs))
    
    # extending class (country) ids
    max_step = 5
    for i in range(1, max_step+1, 1):
        cmd = "Extending class: step "+str(i)+" from " + str(max_step)
        print(cmd)
        uniqueIDs = pcr.cover(uniqueIDs, pcr.windowmajority(uniqueIDs, 0.5))
    # - use only cells within the landmask
    uniqueIDs = pcr.ifthen(landmask, uniqueIDs)
    pcr.report(uniqueIDs, "class_ids.map")                                
    
    # cell area at 5 arc min resolution
Exemple #38
0
def getMinMaxMean(mapFile):
    mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0]
    mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0]
    nrValues  = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 ) [0] #/ getNumNonMissingValues(mapFile)
    return mn,mx,(getMapTotal(mapFile) / nrValues)
Exemple #39
0
    def getParameterFiles(self,currTimeStep,cellArea,ldd,\
                               initial_condition_dictionary = None,\
                               currTimeStepInDateTimeFormat = False):

        # parameters for Water Bodies: fracWat
        #                              waterBodyIds
        #                              waterBodyOut
        #                              waterBodyArea
        #                              waterBodyTyp
        #                              waterBodyCap

        # cell surface area (m2) and ldd
        self.cellArea = cellArea
        ldd = pcr.ifthen(self.landmask, ldd)

        # date used for accessing/extracting water body information
        if currTimeStepInDateTimeFormat:
            date_used = currTimeStep
            year_used = currTimeStep.year
        else:
            date_used = currTimeStep.fulldate
            year_used = currTimeStep.year
        if self.onlyNaturalWaterBodies == True:
            date_used = self.dateForNaturalCondition
            year_used = self.dateForNaturalCondition[0:4]

        # fracWat = fraction of surface water bodies (dimensionless)
        self.fracWat = pcr.scalar(0.0)

        if self.useNetCDF:
            self.fracWat = vos.netcdf2PCRobjClone(self.ncFileInp,'fracWaterInp', \
                           date_used, useDoy = 'yearly',\
                           cloneMapFileName = self.cloneMap)
        else:
            self.fracWat = vos.readPCRmapClone(\
                           self.fracWaterInp+str(year_used)+".map",
                           self.cloneMap,self.tmpDir,self.inputDir)

        self.fracWat = pcr.cover(self.fracWat, 0.0)
        self.fracWat = pcr.max(0.0, self.fracWat)
        self.fracWat = pcr.min(1.0, self.fracWat)

        self.waterBodyIds = pcr.nominal(0)  # waterBody ids
        self.waterBodyOut = pcr.boolean(0)  # waterBody outlets
        self.waterBodyArea = pcr.scalar(0.)  # waterBody surface areas

        # water body ids
        if self.useNetCDF:
            self.waterBodyIds = vos.netcdf2PCRobjClone(self.ncFileInp,'waterBodyIds', \
                                date_used, useDoy = 'yearly',\
                                cloneMapFileName = self.cloneMap)
        else:
            self.waterBodyIds = vos.readPCRmapClone(\
                self.waterBodyIdsInp+str(year_used)+".map",\
                self.cloneMap,self.tmpDir,self.inputDir,False,None,True)
        #
        self.waterBodyIds = pcr.ifthen(\
                            pcr.scalar(self.waterBodyIds) > 0.,\
                            pcr.nominal(self.waterBodyIds))

        # water body outlets (correcting outlet positions)
        wbCatchment = pcr.catchmenttotal(pcr.scalar(1), ldd)
        self.waterBodyOut = pcr.ifthen(wbCatchment ==\
                            pcr.areamaximum(wbCatchment, \
                            self.waterBodyIds),\
                            self.waterBodyIds) # = outlet ids           # This may give more than two outlets, particularly if there are more than one cells that have largest upstream areas
        # - make sure that there is only one outlet for each water body
        self.waterBodyOut = pcr.ifthen(\
                            pcr.areaorder(pcr.scalar(self.waterBodyOut), \
                            self.waterBodyOut) == 1., self.waterBodyOut)
        self.waterBodyOut = pcr.ifthen(\
                            pcr.scalar(self.waterBodyIds) > 0.,\
                            self.waterBodyOut)

        # TODO: Please also consider endorheic lakes!

        # correcting water body ids
        self.waterBodyIds = pcr.ifthen(\
                            pcr.scalar(self.waterBodyIds) > 0.,\
                            pcr.subcatchment(ldd,self.waterBodyOut))

        # boolean map for water body outlets:
        self.waterBodyOut = pcr.ifthen(\
                            pcr.scalar(self.waterBodyOut) > 0.,\
                            pcr.boolean(1))

        # reservoir surface area (m2):
        if self.useNetCDF:
            resSfArea = 1000. * 1000. * \
                        vos.netcdf2PCRobjClone(self.ncFileInp,'resSfAreaInp', \
                        date_used, useDoy = 'yearly',\
                        cloneMapFileName = self.cloneMap)
        else:
            resSfArea = 1000. * 1000. * vos.readPCRmapClone(
                   self.resSfAreaInp+str(year_used)+".map",\
                   self.cloneMap,self.tmpDir,self.inputDir)
        resSfArea = pcr.areaaverage(resSfArea, self.waterBodyIds)
        resSfArea = pcr.cover(resSfArea, 0.)

        # water body surface area (m2): (lakes and reservoirs)
        self.waterBodyArea = pcr.max(pcr.areatotal(\
                             pcr.cover(\
                             self.fracWat*self.cellArea, 0.0), self.waterBodyIds),
                             pcr.areaaverage(\
                             pcr.cover(resSfArea, 0.0) ,       self.waterBodyIds))
        self.waterBodyArea = pcr.ifthen(self.waterBodyArea > 0.,\
                             self.waterBodyArea)

        # correcting water body ids and outlets (exclude all water bodies with surfaceArea = 0)
        self.waterBodyIds = pcr.ifthen(self.waterBodyArea > 0.,
                                       self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(pcr.boolean(self.waterBodyIds),
                                       self.waterBodyOut)

        # water body types:
        # - 2 = reservoirs (regulated discharge)
        # - 1 = lakes (weirFormula)
        # - 0 = non lakes or reservoirs (e.g. wetland)
        self.waterBodyTyp = pcr.nominal(0)

        if self.useNetCDF:
            self.waterBodyTyp = vos.netcdf2PCRobjClone(self.ncFileInp,'waterBodyTyp', \
                                date_used, useDoy = 'yearly',\
                                cloneMapFileName = self.cloneMap)
        else:
            self.waterBodyTyp = vos.readPCRmapClone(
                self.waterBodyTypInp+str(year_used)+".map",\
                self.cloneMap,self.tmpDir,self.inputDir,False,None,True)

        # excluding wetlands (waterBodyTyp = 0) in all functions related to lakes/reservoirs
        #
        self.waterBodyTyp = pcr.ifthen(\
                            pcr.scalar(self.waterBodyTyp) > 0,\
                            pcr.nominal(self.waterBodyTyp))
        self.waterBodyTyp = pcr.ifthen(\
                            pcr.scalar(self.waterBodyIds) > 0,\
                            pcr.nominal(self.waterBodyTyp))
        self.waterBodyTyp = pcr.areamajority(self.waterBodyTyp,\
                                             self.waterBodyIds)     # choose only one type: either lake or reservoir
        self.waterBodyTyp = pcr.ifthen(\
                            pcr.scalar(self.waterBodyTyp) > 0,\
                            pcr.nominal(self.waterBodyTyp))
        self.waterBodyTyp = pcr.ifthen(pcr.boolean(self.waterBodyIds),
                                       self.waterBodyTyp)

        # correcting lakes and reservoirs ids and outlets
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0, self.waterBodyOut)

        # reservoir maximum capacity (m3):
        self.resMaxCap = pcr.scalar(0.0)
        self.waterBodyCap = pcr.scalar(0.0)

        if self.useNetCDF:
            self.resMaxCap = 1000. * 1000. * \
                             vos.netcdf2PCRobjClone(self.ncFileInp,'resMaxCapInp', \
                             date_used, useDoy = 'yearly',\
                             cloneMapFileName = self.cloneMap)
        else:
            self.resMaxCap = 1000. * 1000. * vos.readPCRmapClone(\
                self.resMaxCapInp+str(year_used)+".map", \
                self.cloneMap,self.tmpDir,self.inputDir)

        self.resMaxCap = pcr.ifthen(self.resMaxCap > 0,\
                                    self.resMaxCap)
        self.resMaxCap = pcr.areaaverage(self.resMaxCap,\
                                         self.waterBodyIds)

        # water body capacity (m3): (lakes and reservoirs)
        self.waterBodyCap = pcr.cover(
            self.resMaxCap, 0.0)  # Note: Most of lakes have capacities > 0.
        self.waterBodyCap = pcr.ifthen(pcr.boolean(self.waterBodyIds),
                                       self.waterBodyCap)

        # correcting water body types:                                  # Reservoirs that have zero capacities will be assumed as lakes.
        self.waterBodyTyp = \
                 pcr.ifthen(pcr.scalar(self.waterBodyTyp) > 0.,\
                                       self.waterBodyTyp)
        self.waterBodyTyp = pcr.ifthenelse(self.waterBodyCap > 0.,\
                                           self.waterBodyTyp,\
                 pcr.ifthenelse(pcr.scalar(self.waterBodyTyp) == 2,\
                                           pcr.nominal(1),\
                                           self.waterBodyTyp))

        # final corrections:
        self.waterBodyTyp = pcr.ifthen(self.waterBodyArea > 0.,\
                                       self.waterBodyTyp)                     # make sure that all lakes and/or reservoirs have surface areas
        self.waterBodyTyp = \
                 pcr.ifthen(pcr.scalar(self.waterBodyTyp) > 0.,\
                                       self.waterBodyTyp)                     # make sure that only types 1 and 2 will be considered in lake/reservoir functions
        self.waterBodyIds = pcr.ifthen(pcr.scalar(self.waterBodyTyp) > 0.,\
                            self.waterBodyIds)                                # make sure that all lakes and/or reservoirs have ids
        self.waterBodyOut = pcr.ifthen(pcr.scalar(self.waterBodyIds) > 0.,\
                                                  self.waterBodyOut)          # make sure that all lakes and/or reservoirs have outlets

        # for a natural run (self.onlyNaturalWaterBodies == True)
        # which uses only the year 1900, assume all reservoirs are lakes
        if self.onlyNaturalWaterBodies == True and date_used == self.dateForNaturalCondition:
            logger.info(
                "Using only natural water bodies identified in the year 1900. All reservoirs in 1900 are assumed as lakes."
            )
            self.waterBodyTyp = \
             pcr.ifthen(pcr.scalar(self.waterBodyTyp) > 0.,\
                        pcr.nominal(1))

        # check that all lakes and/or reservoirs have types, ids, surface areas and outlets:
        test = pcr.defined(self.waterBodyTyp) & pcr.defined(self.waterBodyArea) &\
               pcr.defined(self.waterBodyIds) & pcr.boolean(pcr.areamaximum(pcr.scalar(self.waterBodyOut), self.waterBodyIds))
        a, b, c = vos.getMinMaxMean(
            pcr.cover(pcr.scalar(test), 1.0) - pcr.scalar(1.0))
        threshold = 1e-3
        if abs(a) > threshold or abs(b) > threshold:
            logger.warning(
                "Missing information in some lakes and/or reservoirs.")

        # at the beginning of simulation period (timeStepPCR = 1)
        # - we have to define/get the initial conditions
        #
        if initial_condition_dictionary != None and currTimeStep.timeStepPCR == 1:
            self.getICs(initial_condition_dictionary)

        # For each new reservoir (introduced at the beginning of the year)
        # initiating storage, average inflow and outflow
        # PS: THIS IS NOT NEEDED FOR OFFLINE MODFLOW RUN!
        #
        try:
            self.waterBodyStorage = pcr.cover(self.waterBodyStorage, 0.0)
            self.avgInflow = pcr.cover(self.avgInflow, 0.0)
            self.avgOutflow = pcr.cover(self.avgOutflow, 0.0)
            self.waterBodyStorage = pcr.ifthen(self.landmask,
                                               self.waterBodyStorage)
            self.avgInflow = pcr.ifthen(self.landmask, self.avgInflow)
            self.avgOutflow = pcr.ifthen(self.landmask, self.avgOutflow)
        except:
            # PS: FOR OFFLINE MODFLOW RUN!
            pass
        # TODO: Remove try and except

        # cropping only in the landmask region:
        self.fracWat = pcr.ifthen(self.landmask, self.fracWat)
        self.waterBodyIds = pcr.ifthen(self.landmask, self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(self.landmask, self.waterBodyOut)
        self.waterBodyArea = pcr.ifthen(self.landmask, self.waterBodyArea)
        self.waterBodyTyp = pcr.ifthen(self.landmask, self.waterBodyTyp)
        self.waterBodyCap = pcr.ifthen(self.landmask, self.waterBodyCap)
    def initial(self):
        """ initial part of the water abstraction module
        """

        # self.testmap=windowaverage(self.var.Elevation,5)
        # self.report(self.testmap,"test.map")

        # ************************************************************
        # ***** WATER USE
        # ************************************************************
        settings = LisSettings.instance()
        option = settings.options
        binding = settings.binding
        maskinfo = MaskInfo.instance()
        if option['wateruse']:
            self.var.WUsePercRemain = loadmap('WUsePercRemain')
            self.var.NoWaterUseSteps = int(loadmap('maxNoWateruse'))
            self.var.GroundwaterBodies = loadmap('GroundwaterBodies')
            self.var.FractionGroundwaterUsed = np.minimum(
                np.maximum(loadmap('FractionGroundwaterUsed'),
                           maskinfo.in_zero()), 1.0)
            self.var.FractionNonConventionalWaterUsed = loadmap(
                'FractionNonConventionalWaterUsed')
            self.var.FractionLakeReservoirWaterUsed = loadmap(
                'FractionLakeReservoirWaterUsed')
            self.var.EFlowThreshold = loadmap('EFlowThreshold')
            # EFlowThreshold is map with m3/s discharge, e.g. the 10th percentile discharge of the baseline run

            self.var.WUseRegionC = loadmap('WUseRegion').astype(int)
            self.var.IrrigationMult = loadmap('IrrigationMult')

            # ************************************************************
            # ***** water use constant maps ******************************
            # ************************************************************

            self.var.IndustryConsumptiveUseFraction = loadmap(
                'IndustryConsumptiveUseFraction')
            # fraction (0-1)
            self.var.WaterReUseFraction = loadmap('WaterReUseFraction')
            # fraction of water re-used (0-1)
            self.var.EnergyConsumptiveUseFraction = loadmap(
                'EnergyConsumptiveUseFraction')
            # fraction (0-1), value depends on cooling technology of power plants
            self.var.LivestockConsumptiveUseFraction = loadmap(
                'LivestockConsumptiveUseFraction')
            # fraction (0-1)
            self.var.LeakageFraction = np.minimum(
                np.maximum(
                    loadmap('LeakageFraction') *
                    (1 - loadmap('LeakageReductionFraction')),
                    maskinfo.in_zero()), 1.0)
            self.var.DomesticLeakageConstant = np.minimum(
                np.maximum(1 / (1 - self.var.LeakageFraction),
                           maskinfo.in_zero()), 1.0)
            # Domestic Water Abstraction becomes larger in case of leakage
            # LeakageFraction is LeakageFraction (0-1) multiplied by reduction scenario (10% reduction is 0.1 in map)
            # 0.65 leakage and 0.1 reduction leads to 0.585 effective leakage, resulting in 2.41 times more water abstraction
            self.var.DomesticWaterSavingConstant = np.minimum(
                np.maximum(1 - loadmap('WaterSavingFraction'),
                           maskinfo.in_zero()), 1.0)
            # Domestic water saving if in place, changes this value from 1 to a value between 0 and 1, and will reduce demand and abstraction
            # so value = 0.9 if WaterSavingFraction equals 0.1 (10%)
            self.var.DomesticConsumptiveUseFraction = loadmap(
                'DomesticConsumptiveUseFraction')
            # fraction (0-1), typically rather low ~ 0.10
            self.var.LeakageWaterLossFraction = loadmap('LeakageWaterLoss')
            # fraction (0-1), 0=no leakage

            # Initialize water demand. Read from a static map either value or pcraster map or netcdf (single or stack).
            # If reading from NetCDF stack, get time step corresponding to model step.
            # Added management for sub-daily modelling time steps
            # Added possibility to use one single average year to be repeated during the simulation
            if option['useWaterDemandAveYear']:
                # CM: using one water demand average year throughout the model simulation
                self.var.DomesticDemandMM = loadmap(
                    'DomesticDemandMaps',
                    timestampflag='closest',
                    averageyearflag=True) * self.var.DtDay
                self.var.IndustrialDemandMM = loadmap(
                    'IndustrialDemandMaps',
                    timestampflag='closest',
                    averageyearflag=True) * self.var.DtDay
                self.var.LivestockDemandMM = loadmap(
                    'LivestockDemandMaps',
                    timestampflag='closest',
                    averageyearflag=True) * self.var.DtDay
                self.var.EnergyDemandMM = loadmap(
                    'EnergyDemandMaps',
                    timestampflag='closest',
                    averageyearflag=True) * self.var.DtDay
            else:
                # CM: using information on water demand from NetCDF files
                self.var.DomesticDemandMM = loadmap(
                    'DomesticDemandMaps',
                    timestampflag='closest') * self.var.DtDay
                self.var.IndustrialDemandMM = loadmap(
                    'IndustrialDemandMaps',
                    timestampflag='closest') * self.var.DtDay
                self.var.LivestockDemandMM = loadmap(
                    'LivestockDemandMaps',
                    timestampflag='closest') * self.var.DtDay
                self.var.EnergyDemandMM = loadmap(
                    'EnergyDemandMaps',
                    timestampflag='closest') * self.var.DtDay

            # Check consistency with the reference calendar that is read from the precipitation forcing file (global_modules.zusatz.optionBinding)
            if option['TransientWaterDemandChange'] and option[
                    'readNetcdfStack']:
                for k in ('DomesticDemandMaps', 'IndustrialDemandMaps',
                          'LivestockDemandMaps', 'EnergyDemandMaps'):
                    with Dataset(binding[k] + '.nc') as nc:
                        cal_type = get_calendar_type(nc)
                        if cal_type != binding['calendar_type']:
                            warnings.warn(
                                calendar_inconsistency_warning(
                                    binding[k], cal_type,
                                    binding['calendar_type']))

            if option['groundwaterSmooth']:
                self.var.GroundwaterBodiesPcr = decompress(
                    self.var.GroundwaterBodies)
                self.var.groundwaterCatch = boolean(
                    decompress((self.var.GroundwaterBodies *
                                self.var.Catchments).astype(int)))
                # nominal(scalar(GroundwaterBodies)*scalar(self.var.Catchments));
                # smoothing for groundwater to correct error by using windowtotal, based on groundwater bodies and catchments
                self.var.LZSmoothRange = loadmap('LZSmoothRange')

            if option['wateruseRegion']:
                WUseRegion = nominal(loadmap('WUseRegion', pcr=True))
                pitWuse1 = ifthen(self.var.AtLastPoint != 0, boolean(1))
                pitWuse1b = ifthen(defined(pitWuse1), WUseRegion)
                # use every existing pit in the Ldd and number them by the water regions
                # coastal water regions can have more than one pit per water region

                pitWuseMax = areamaximum(self.var.UpArea, WUseRegion)
                pitWuse2 = ifthen(pitWuseMax == self.var.UpArea, WUseRegion)
                # search outlets in the inland water regions by using the maximum  upstream area as criterium

                pitWuse3 = downstream(self.var.LddStructuresKinematic,
                                      WUseRegion)
                pitWuse3b = ifthen(pitWuse3 != WUseRegion, WUseRegion)
                # search point where ldd leaves a water region

                pitWuse = cover(pitWuse1b, pitWuse2, pitWuse3b, nominal(0))
                # join all sources of pits

                LddWaterRegion = lddrepair(
                    ifthenelse(pitWuse == 0, self.var.LddStructuresKinematic,
                               5))
                # create a Ldd with pits at every water region outlet
                # this results in a interrupted ldd, so water cannot be transfered to the next water region
                lddC = compressArray(LddWaterRegion)
                inAr = decompress(
                    np.arange(maskinfo.info.mapC[0], dtype="int32"))
                # giving a number to each non missing pixel as id
                self.var.downWRegion = (compressArray(
                    downstream(LddWaterRegion, inAr))).astype(np.int32)
                # each upstream pixel gets the id of the downstream pixel
                self.var.downWRegion[lddC == 5] = maskinfo.info.mapC[0]
                # all pits gets a high number

                # ************************************************************
                # ***** OUTFLOW AND INFLOW POINTS FOR WATER REGIONS **********
                # ************************************************************

                self.var.WaterRegionOutflowPoints = ifthen(
                    pitWuse != 0, boolean(1))
                # outflowpoints to calculate upstream inflow for balances and Water Exploitation Index
                # both inland outflowpoints to downstream subbasin, and coastal outlets

                WaterRegionInflow1 = boolean(
                    upstream(
                        self.var.LddStructuresKinematic,
                        cover(scalar(self.var.WaterRegionOutflowPoints), 0)))
                self.var.WaterRegionInflowPoints = ifthen(
                    WaterRegionInflow1, boolean(1))
                # inflowpoints to calculate upstream inflow for balances and Water Exploitation Index
            else:
                self.var.downWRegion = self.var.downstruct.copy()
                self.var.downWRegion = self.var.downWRegion.astype(np.int32)

            # ************************************************************
            # ***** Initialising cumulative output variables *************
            # ************************************************************

            # These are all needed to compute the cumulative mass balance error
            self.var.wateruseCum = maskinfo.in_zero()
            # water use cumulated amount
            self.var.WUseAddM3Dt = maskinfo.in_zero()
            self.var.WUseAddM3 = maskinfo.in_zero()

            self.var.IrriLossCUM = maskinfo.in_zero()
            # Cumulative irrigation loss [mm]
            # Cumulative abstraction from surface water [mm]

            self.var.TotalAbstractionFromSurfaceWaterM3 = maskinfo.in_zero()
            self.var.TotalAbstractionFromGroundwaterM3 = maskinfo.in_zero()
            self.var.TotalIrrigationAbstractionM3 = maskinfo.in_zero()
            self.var.TotalPaddyRiceIrrigationAbstractionM3 = maskinfo.in_zero()
            self.var.TotalLivestockAbstractionM3 = maskinfo.in_zero()

            self.var.IrrigationType = loadmap('IrrigationType')
            self.var.IrrigationEfficiency = loadmap('IrrigationEfficiency')
            self.var.ConveyanceEfficiency = loadmap('ConveyanceEfficiency')

            self.var.GroundwaterRegionPixels = np.take(
                np.bincount(self.var.WUseRegionC,
                            weights=self.var.GroundwaterBodies),
                self.var.WUseRegionC)
            self.var.AllRegionPixels = np.take(
                np.bincount(self.var.WUseRegionC,
                            weights=self.var.GroundwaterBodies * 0.0 + 1.0),
                self.var.WUseRegionC)
            self.var.RatioGroundWaterUse = self.var.AllRegionPixels / (
                self.var.GroundwaterRegionPixels + 0.01)
            self.var.FractionGroundwaterUsed = np.minimum(
                self.var.FractionGroundwaterUsed *
                self.var.RatioGroundWaterUse,
                1 - self.var.FractionNonConventionalWaterUsed)
            # FractionGroundwaterUsed is a percentage given at national scale
            # since the water needs to come from the GroundwaterBodies pixels,
            # the fraction needs correction for the non-Groundwaterbodies; this is done here
            self.var.EFlowIndicator = maskinfo.in_zero()
            self.var.ReservoirAbstractionM3 = maskinfo.in_zero()
            self.var.PotentialSurfaceWaterAvailabilityForIrrigationM3 = maskinfo.in_zero(
            )
            self.var.LakeAbstractionM3 = maskinfo.in_zero()
            self.var.FractionAbstractedFromChannels = maskinfo.in_zero()
            self.var.AreatotalIrrigationUseM3 = maskinfo.in_zero()
            self.var.totalAddM3 = maskinfo.in_zero()
            self.var.TotalDemandM3 = maskinfo.in_zero()
import pcraster as pcr
import netCDF4 as nc

# clone map
clone_map_file_name = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
pcr.setclone(clone_map_file_name)

# aquifer thickness map:
aquifer_thickness_file_name = "/projects/0/dfguu/users/edwin/data/aquifer_properties/thickness_05min.map"
aquifer_thickness = pcr.readmap(aquifer_thickness_file_name)

# extent of confining layer
extent_of_confining_layer_file_name = "/home/edwin/data/inge_confining_layer_parameters/conflayers4.map"
confining_layer_extent = pcr.boolean(pcr.readmap(extent_of_confining_layer_file_name))

# thickness of confining layer = 10 percent from the first 250 m
confining_layer_thickness = pcr.ifthen(confining_layer_extent, pcr.min(250.0, aquifer_thickness)) * 0.10

# extrapolate
confining_layer_thickness = pcr.cover(confining_layer_thickness, pcr.windowaverage(pcr.cover(confining_layer_thickness, 0.0), 0.50))
confining_layer_thickness = pcr.cover(confining_layer_thickness, 0.0)
confining_layer_thickness_output_filename = "/home/edwin/data/inge_confining_layer_parameters/confining_layer_thickness_edwin.map"
pcr.report(confining_layer_thickness, confining_layer_thickness_output_filename)

# masking only to the landmask 
landmask_file_name = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
landmask = pcr.defined(landmask_file_name)
confining_layer_thickness_masked_output_filename = "/home/edwin/data/inge_confining_layer_parameters/confining_layer_thickness_edwin.masked.map"
pcr.report(pcr.ifthen(landmask, confining_layer_thickness), confining_layer_thickness_masked_output_filename)
except:
	pass

# making temporary directory:
tmp_directory = output_directory + "/tmp/"
try:
	os.makedirs(tmp_directory)
except:
	pass

# set clone
clone_map = "/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
pcr.setclone(clone_map)

# landmask map
landmask = pcr.defined(pcr.readmap(clone_map))

# class map file name:
#~ class_map_file_name = "/home/sutan101/data/aqueduct_gis_layers/aqueduct_shp_from_marta/Aqueduct_States.map"
#~ class_map_file_name = "/home/sutan101/data/aqueduct_gis_layers/aqueduct_shp_from_marta/Aqueduct_GDBD.map"
#~ class_map_file_name = "/home/sutan101/data/processing_whymap/version_19september2014/major_aquifer_30min.extended.map"
#~ class_map_file_name = "/home/sutan101/data/processing_whymap/version_19september2014/major_aquifer_30min.map"
class_map_file_name = str(sys.argv[2])
class_map_default_folder = "/home/sutan101/data/aqueduct_gis_layers/aqueduct_shp_from_marta/" 
if class_map_file_name == "state": class_map_file_name = class_map_default_folder + "/Aqueduct_States.map"
if class_map_file_name == "drainage_unit": class_map_file_name = class_map_default_folder + "/Aqueduct_GDBD.map"
if class_map_file_name == "aquifer": class_map_file_name = class_map_default_folder + "/why_wgs1984_BUENO.map"
if class_map_file_name == "country": class_map_file_name = "/home/sutan101/data/country_shp_from_tianyi/World_Polys_High.map"

# reading the class map
class_map    = pcr.nominal(pcr.uniqueid(landmask))
os.remove(tempCloneMap)
msg =' all done'
logger.info(msg)
print
print

# set the global clone map
clone_map_file = "/projects/0/dfguu/users/edwinhs/data/HydroSHEDS/hydro_basin_without_lakes/integrating_ldd/version_9_december_2016/merged_ldd.map"
pcr.setclone(clone_map_file)

# set the landmask
# - using the following landmask (defined to exclude river basins with limited output of PCR-GLOBWB / limited output of extreme value analyses)
landmask_30sec_file = "/projects/0/aqueduct/users/edwinsut/data/landmasks_for_extreme_value_analysis_and_downscaling/landmask_downscaling/landmask_downscaling_30sec.map"
msg = "Set the (high resolution) landmask based on the file: " + str(landmask_30sec_file)
logger.info(msg)
landmask_30sec = pcr.defined(pcr.readmap(landmask_30sec_file))
landmask_used  = pcr.ifthen(landmask_30sec, landmask_30sec)

# boolean maps to mask out permanent water bodies (lakes and reservoirs):
reservoirs_30sec_file = "/projects/0/aqueduct/users/edwinsut/data/reservoirs_and_lakes_30sec/grand_reservoirs_v1_1.boolean.map"
msg = "Set the (high resolution) reservoirs based on the file: " + str(reservoirs_30sec_file)
logger.info(msg)
reservoirs_30sec = pcr.cover(pcr.readmap(reservoirs_30sec_file), pcr.boolean(0.0))
lakes_30sec_file      = "/projects/0/aqueduct/users/edwinsut/data/reservoirs_and_lakes_30sec/glwd1_lakes.boolean.map"
msg = "Set the (high resolution) lakes based on the file: " + str(lakes_30sec_file)
logger.info(msg)
lakes_30sec = pcr.cover(pcr.readmap(lakes_30sec_file), pcr.boolean(0.0))
#
# cells that do not belong lakes and reservoirs
non_permanent_water_bodies = pcr.ifthenelse(reservoirs_30sec, pcr.boolean(0.0), pcr.boolean(1.0))
non_permanent_water_bodies = pcr.ifthen(non_permanent_water_bodies, non_permanent_water_bodies)
Exemple #44
0
    def getParameterFiles(
        self, currTimeStep, cellArea, ldd, initial_condition_dictionary=None
    ):

        # parameters for Water Bodies: fracWat
        #                              waterBodyIds
        #                              waterBodyOut
        #                              waterBodyArea
        #                              waterBodyTyp
        #                              waterBodyCap

        # cell surface area (m2) and ldd
        self.cellArea = cellArea
        ldd = pcr.ifthen(self.landmask, ldd)

        # date used for accessing/extracting water body information
        date_used = currTimeStep.fulldate
        year_used = currTimeStep.year
        if self.onlyNaturalWaterBodies == True:
            date_used = self.dateForNaturalCondition
            year_used = self.dateForNaturalCondition[0:4]

        # fracWat = fraction of surface water bodies (dimensionless)
        self.fracWat = pcr.scalar(0.0)

        if self.useNetCDF:
            self.fracWat = vos.netcdf2PCRobjClone(
                self.ncFileInp,
                "fracWaterInp",
                date_used,
                useDoy="yearly",
                cloneMapFileName=self.cloneMap,
            )
        else:
            self.fracWat = vos.readPCRmapClone(
                self.fracWaterInp + str(year_used) + ".map",
                self.cloneMap,
                self.tmpDir,
                self.inputDir,
            )

        self.fracWat = pcr.cover(self.fracWat, 0.0)
        self.fracWat = pcr.max(0.0, self.fracWat)
        self.fracWat = pcr.min(1.0, self.fracWat)

        self.waterBodyIds = pcr.nominal(0)  # waterBody ids
        self.waterBodyOut = pcr.boolean(0)  # waterBody outlets
        self.waterBodyArea = pcr.scalar(0.0)  # waterBody surface areas

        # water body ids
        if self.useNetCDF:
            self.waterBodyIds = vos.netcdf2PCRobjClone(
                self.ncFileInp,
                "waterBodyIds",
                date_used,
                useDoy="yearly",
                cloneMapFileName=self.cloneMap,
            )
        else:
            self.waterBodyIds = vos.readPCRmapClone(
                self.waterBodyIdsInp + str(year_used) + ".map",
                self.cloneMap,
                self.tmpDir,
                self.inputDir,
                False,
                None,
                True,
            )
        #
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0, pcr.nominal(self.waterBodyIds)
        )

        # water body outlets (correcting outlet positions)
        wbCatchment = pcr.catchmenttotal(pcr.scalar(1), ldd)
        self.waterBodyOut = pcr.ifthen(
            wbCatchment == pcr.areamaximum(wbCatchment, self.waterBodyIds),
            self.waterBodyIds,
        )  # = outlet ids
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0, self.waterBodyOut
        )
        # TODO: Please also consider endorheic lakes!

        # correcting water body ids
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0,
            pcr.subcatchment(ldd, self.waterBodyOut),
        )

        # boolean map for water body outlets:
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyOut) > 0.0, pcr.boolean(1)
        )

        # reservoir surface area (m2):
        if self.useNetCDF:
            resSfArea = (
                1000.0
                * 1000.0
                * vos.netcdf2PCRobjClone(
                    self.ncFileInp,
                    "resSfAreaInp",
                    date_used,
                    useDoy="yearly",
                    cloneMapFileName=self.cloneMap,
                )
            )
        else:
            resSfArea = (
                1000.0
                * 1000.0
                * vos.readPCRmapClone(
                    self.resSfAreaInp + str(year_used) + ".map",
                    self.cloneMap,
                    self.tmpDir,
                    self.inputDir,
                )
            )
        resSfArea = pcr.areaaverage(resSfArea, self.waterBodyIds)
        resSfArea = pcr.cover(resSfArea, 0.0)

        # water body surface area (m2): (lakes and reservoirs)
        self.waterBodyArea = pcr.max(
            pcr.areatotal(
                pcr.cover(self.fracWat * self.cellArea, 0.0), self.waterBodyIds
            ),
            pcr.areaaverage(pcr.cover(resSfArea, 0.0), self.waterBodyIds),
        )
        self.waterBodyArea = pcr.ifthen(self.waterBodyArea > 0.0, self.waterBodyArea)

        # correcting water body ids and outlets (exclude all water bodies with surfaceArea = 0)
        self.waterBodyIds = pcr.ifthen(self.waterBodyArea > 0.0, self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(
            pcr.boolean(self.waterBodyIds), self.waterBodyOut
        )

        # water body types:
        # - 2 = reservoirs (regulated discharge)
        # - 1 = lakes (weirFormula)
        # - 0 = non lakes or reservoirs (e.g. wetland)
        self.waterBodyTyp = pcr.nominal(0)

        if self.useNetCDF:
            self.waterBodyTyp = vos.netcdf2PCRobjClone(
                self.ncFileInp,
                "waterBodyTyp",
                date_used,
                useDoy="yearly",
                cloneMapFileName=self.cloneMap,
            )
        else:
            self.waterBodyTyp = vos.readPCRmapClone(
                self.waterBodyTypInp + str(year_used) + ".map",
                self.cloneMap,
                self.tmpDir,
                self.inputDir,
                False,
                None,
                True,
            )

        # excluding wetlands (waterBodyTyp = 0) in all functions related to lakes/reservoirs
        #
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, pcr.nominal(self.waterBodyTyp)
        )
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0, pcr.nominal(self.waterBodyTyp)
        )
        self.waterBodyTyp = pcr.areamajority(
            self.waterBodyTyp, self.waterBodyIds
        )  # choose only one type: either lake or reservoir
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, pcr.nominal(self.waterBodyTyp)
        )
        self.waterBodyTyp = pcr.ifthen(
            pcr.boolean(self.waterBodyIds), self.waterBodyTyp
        )

        # correcting lakes and reservoirs ids and outlets
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, self.waterBodyIds
        )
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0, self.waterBodyOut
        )

        # reservoir maximum capacity (m3):
        self.resMaxCap = pcr.scalar(0.0)
        self.waterBodyCap = pcr.scalar(0.0)

        if self.useNetCDF:
            self.resMaxCap = (
                1000.0
                * 1000.0
                * vos.netcdf2PCRobjClone(
                    self.ncFileInp,
                    "resMaxCapInp",
                    date_used,
                    useDoy="yearly",
                    cloneMapFileName=self.cloneMap,
                )
            )
        else:
            self.resMaxCap = (
                1000.0
                * 1000.0
                * vos.readPCRmapClone(
                    self.resMaxCapInp + str(year_used) + ".map",
                    self.cloneMap,
                    self.tmpDir,
                    self.inputDir,
                )
            )

        self.resMaxCap = pcr.ifthen(self.resMaxCap > 0, self.resMaxCap)
        self.resMaxCap = pcr.areaaverage(self.resMaxCap, self.waterBodyIds)

        # water body capacity (m3): (lakes and reservoirs)
        self.waterBodyCap = pcr.cover(
            self.resMaxCap, 0.0
        )  # Note: Most of lakes have capacities > 0.
        self.waterBodyCap = pcr.ifthen(
            pcr.boolean(self.waterBodyIds), self.waterBodyCap
        )

        # correcting water body types:                                  # Reservoirs that have zero capacities will be assumed as lakes.
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0.0, self.waterBodyTyp
        )
        self.waterBodyTyp = pcr.ifthenelse(
            self.waterBodyCap > 0.0,
            self.waterBodyTyp,
            pcr.ifthenelse(
                pcr.scalar(self.waterBodyTyp) == 2, pcr.nominal(1), self.waterBodyTyp
            ),
        )

        # final corrections:
        self.waterBodyTyp = pcr.ifthen(
            self.waterBodyArea > 0.0, self.waterBodyTyp
        )  # make sure that all lakes and/or reservoirs have surface areas
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0.0, self.waterBodyTyp
        )  # make sure that only types 1 and 2 will be considered in lake/reservoir functions
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0.0, self.waterBodyIds
        )  # make sure that all lakes and/or reservoirs have ids
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0, self.waterBodyOut
        )  # make sure that all lakes and/or reservoirs have outlets

        # for a natural run (self.onlyNaturalWaterBodies == True)
        # which uses only the year 1900, assume all reservoirs are lakes
        if (
            self.onlyNaturalWaterBodies == True
            and date_used == self.dateForNaturalCondition
        ):
            logger.info(
                "Using only natural water bodies identified in the year 1900. All reservoirs in 1900 are assumed as lakes."
            )
            self.waterBodyTyp = pcr.ifthen(
                pcr.scalar(self.waterBodyTyp) > 0.0, pcr.nominal(1)
            )

        # check that all lakes and/or reservoirs have types, ids, surface areas and outlets:
        test = (
            pcr.defined(self.waterBodyTyp)
            & pcr.defined(self.waterBodyArea)
            & pcr.defined(self.waterBodyIds)
            & pcr.boolean(
                pcr.areamaximum(pcr.scalar(self.waterBodyOut), self.waterBodyIds)
            )
        )
        a, b, c = vos.getMinMaxMean(pcr.cover(pcr.scalar(test), 1.0) - pcr.scalar(1.0))
        threshold = 1e-3
        if abs(a) > threshold or abs(b) > threshold:
            logger.warning("Missing information in some lakes and/or reservoirs.")

        # at the beginning of simulation period (timeStepPCR = 1)
        # - we have to define/get the initial conditions
        #
        if currTimeStep.timeStepPCR == 1:
            self.getICs(initial_condition_dictionary)

        # For each new reservoir (introduced at the beginning of the year)
        # initiating storage, average inflow and outflow
        #
        self.waterBodyStorage = pcr.cover(self.waterBodyStorage, 0.0)
        self.avgInflow = pcr.cover(self.avgInflow, 0.0)
        self.avgOutflow = pcr.cover(self.avgOutflow, 0.0)

        # cropping only in the landmask region:
        self.fracWat = pcr.ifthen(self.landmask, self.fracWat)
        self.waterBodyIds = pcr.ifthen(self.landmask, self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(self.landmask, self.waterBodyOut)
        self.waterBodyArea = pcr.ifthen(self.landmask, self.waterBodyArea)
        self.waterBodyTyp = pcr.ifthen(self.landmask, self.waterBodyTyp)
        self.waterBodyCap = pcr.ifthen(self.landmask, self.waterBodyCap)
        self.waterBodyStorage = pcr.ifthen(self.landmask, self.waterBodyStorage)
        self.avgInflow = pcr.ifthen(self.landmask, self.avgInflow)
        self.avgOutflow = pcr.ifthen(self.landmask, self.avgOutflow)
#~ input_files["historical"]['folder'] = "/scratch-shared/edwinhs/bias_correction_test/input/historical/gumbel_fits/gfdl-esm2m_1960-1999/"
#
input_files["historical"]['file_name'] = {}
input_files["historical"]['file_name']['channelStorage']    = input_files["historical"]['folder'] + "/" + "gumbel_analysis_output_for_channel_storage.nc" 
input_files["historical"]['file_name']['surfaceWaterLevel'] = input_files["historical"]['folder'] + "/" + "gumbel_analysis_output_for_surface_water_level.nc" 
#
#
# general input files
# - clone map
input_files['clone_map_05min'] = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
pcr.setclone(input_files['clone_map_05min'])
# - cell area, ldd maps
input_files['cell_area_05min'] = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/cellsize05min.correct.map"
input_files['ldd_map_05min'  ] = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
# - landmask
landmask = pcr.defined(input_files['ldd_map_05min'  ])
#
# The gumbel fit parameters based on the annual flood maxima based on the BASELINE run: WATCH 1960-1999
input_files["baseline"]  = {}
input_files["baseline"]['folder']    = os.path.abspath(sys.argv[3]) + "/"
input_files["baseline"]['file_name'] = {}
input_files["baseline"]['file_name']['channelStorage']    = input_files["baseline"]['folder'] + "/" + "gumbel_analysis_output_for_channel_storage.nc" 
input_files["baseline"]['file_name']['surfaceWaterLevel'] = input_files["baseline"]['folder'] + "/" + "gumbel_analysis_output_for_surface_water_level.nc" 


# output files
output_files                    = {}
#
# - output folder
# output folder based on the system argument
output_folder_for_this_analysis = sys.argv[4]
Exemple #46
0
         pcraster.readmap(os.path.join(dataPath, "NELddF00.out")),
         pcraster.readmap(os.path.join(dataPath, "NLddF000.out")),
         pcraster.readmap(os.path.join(dataPath, "NWLddF00.out")),
         pcraster.readmap(os.path.join(dataPath, "SELddF00.out")),
         pcraster.readmap(os.path.join(dataPath, "SLddF000.out")),
         pcraster.readmap(os.path.join(dataPath, "SWLddF00.out")),
         pcraster.readmap(os.path.join(dataPath, "WLddF000.out")))
mldd.addStream(
         pcraster.readmap(os.path.join(dataPath, "ELddF000.out")))
mldd.setDem(pcraster.spatial(pcraster.scalar(1)))


upstream = mldd.upstream(pcraster.spatial(pcraster.scalar(1)))
pcraster.report(upstream, "upstream.map")

accuflux = mldd.accuflux(pcraster.ifthen(pcraster.defined(upstream),
         pcraster.spatial(pcraster.scalar(1))))
pcraster.report(accuflux, "accuflux.map")

dem = mldd.getDem()
pcraster.report(dem, "dem.map")

streamN, streamNE, streamE, streamSE, streamS, streamSW, streamW, streamNW = \
         mldd.getStream()

pcraster.report(streamN , "streamN.map")
pcraster.report(streamNE, "streamNE.map")
pcraster.report(streamE , "streamE.map")
pcraster.report(streamSE, "streamSE.map")
pcraster.report(streamS , "streamS.map")
pcraster.report(streamSW, "streamSW.map")
Exemple #47
0
print("")
input_files['averageClimatologyDischargeMonthAvg'] = out_file

# set the pcraster clone, ldd, landmask, and cell area map
msg = "Setting the clone, ldd, landmask, and cell area maps" + ":"
logger.info(msg)
# - clone
clone_map_file = input_files['clone_map_05min']
pcr.setclone(clone_map_file)
# - ldd
ldd = vos.readPCRmapClone(input_files['ldd_map_05min'], clone_map_file,
                          output_files['tmp_folder'], None, True)
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))
# - cell area
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'], clone_map_file,
                                output_files['tmp_folder'])

# set the basin map
msg = "Setting the basin map" + ":"
logger.info(msg)
basin_map = pcr.nominal(\
            vos.readPCRmapClone(input_files['basin_map_05min'],
                                input_files['clone_map_05min'],
                                output_files['tmp_folder'],
                                None, False, None, True))
#~ pcr.aguila(basin_map)
# - extend/extrapolate the basin
basin_map = pcr.cover(basin_map, pcr.windowmajority(basin_map, 0.5))
    except:
        pass
    try:
        os.makedirs(tmp_directory)    
    except:
        pass
            
    # initiate the netcd file and object: 
    tssNetCDF = ConvertMapsToNetCDF4(cloneMapFile = cloneMapFileName, attribute = attributeDictionary, cellSizeInArcMinutes = cellSizeInArcMinutes)
    tssNetCDF.createNetCDF(ncFileName,varNames,varUnits)

    index = 0 # for posCnt
    
    # set clone and define land mask region
    pcr.setclone(landmask05minFile)
    landmask = pcr.defined(pcr.readmap(landmask05minFile))
    landmask = pcr.ifthen(landmask, landmask)
    
    # cell area at 5 arc min resolution
    cellArea = vos.readPCRmapClone(cellArea05minFile,
                                   cloneMapFileName,tmp_directory)
    cellArea = pcr.ifthen(landmask,cellArea)
    
    # ids for every 30 arc min grid:
    uniqueIDs30min = vos.readPCRmapClone(uniqueIDs30minFile,
                                         cloneMapFileName,tmp_directory) 
    uniqueIDs30min = pcr.nominal(pcr.ifthen(landmask, uniqueIDs30min))
    
    for iYear in range(staYear,endYear+1):
        for iMonth in range(1,12+1):
            timeStamp = datetime.datetime(int(iYear),int(iMonth),int(1),int(0))
# - dem map
# -- using the dem from deltares
dem_map_high_resolution_file_name = "/projects/0/dfguu/users/edwinhs/data/HydroSHEDS/hydro_basin_without_lakes/integrating_ldd/version_9_december_2016/cover_SRTM_1km_merge_gtopo_masked.map"
#~ # -- using the gtopo30 dem
#~ dem_map_high_resolution_file_name = "/projects/0/dfguu/data/hydroworld/basedata/hydrography/GTOPO30/edwin_process/gtopo30_full.map"
#
# TODO: using the DEMs shared by Prof. Lehner (including 30 arcsec file)
#
dem_map_high_resolution = vos.readPCRmapClone(dem_map_high_resolution_file_name, \
                                              clone_map_file, \
                                              tmp_folder, \
                                              None, False, None, False)

dem_map_high_resolution = pcr.cover(dem_map_high_resolution, 0.0)
# - use dem only where ldd are defined
dem_map_high_resolution = pcr.ifthen(pcr.defined(ldd_map_high_resolution), dem_map_high_resolution)
pcr.report(dem_map_high_resolution, "resampled_high_resolution_dem.map")


# calculating high resolution stream order maps
msg = "Calculating a high resolution stream order map."
logger.info(msg)
stream_order_map = pcr.streamorder(ldd_map_high_resolution)
#
# strahler order option
strahler_order_used = strahler_order_number
msg = "The strahler order number used for this downscaling method: " + str(strahler_order_used)
logger.info(msg)
pcr.report(stream_order_map, "high_resolution_stream_order.map")
#
# TODO: Shall we ignore smaller rivers (< 10 m)?
# set the pcraster clone, ldd, landmask, and cell area map 
msg = "Setting the clone, ldd, landmask, and cell area maps" + ":"
logger.info(msg)
# - clone 
clone_map_file = input_files['clone_map_05min']
pcr.setclone(clone_map_file)
# - ldd
ldd = vos.readPCRmapClone(input_files['ldd_map_05min'],
                          clone_map_file,
                          output_files['tmp_folder'],
                          None,
                          True)
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask  = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))
# - cell area
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'],
                          clone_map_file,
                          output_files['tmp_folder'])


# read the hydrological year 
msg = "Reading the hydrological year types" + ":"
logger.info(msg)
hydro_year_type = pcr.nominal(\
                  vos.readPCRmapClone(input_files['hydro_year_05min'],
                                      input_files['clone_map_05min'],
                                      output_files['tmp_folder'],
                                      None, False, None, True))
hydro_year_type = pcr.cover(hydro_year_type, pcr.nominal(1.0))
# - dem map
# -- using the dem from deltares
dem_map_high_resolution_file_name = "/projects/0/dfguu/users/edwinhs/data/HydroSHEDS/hydro_basin_without_lakes/integrating_ldd/version_9_december_2016/cover_SRTM_1km_merge_gtopo_masked.map"
#~ # -- using the gtopo30 dem
#~ dem_map_high_resolution_file_name = "/projects/0/dfguu/data/hydroworld/basedata/hydrography/GTOPO30/edwin_process/gtopo30_full.map"
#
# TODO: using the merged DEMs from HydroSHEDS and Deltares/GTOPO30
#
dem_map_high_resolution = vos.readPCRmapClone(dem_map_high_resolution_file_name, \
                                              clone_map_file, \
                                              tmp_folder, \
                                              None, False, None, False)

dem_map_high_resolution = pcr.cover(dem_map_high_resolution, 0.0)
# - use dem only where ldd are defined
dem_map_high_resolution = pcr.ifthen(pcr.defined(ldd_map_high_resolution),
                                     dem_map_high_resolution)
pcr.report(dem_map_high_resolution, "resampled_high_resolution_dem.map")

# calculating high resolution stream order maps
msg = "Calculating a high resolution stream order map."
logger.info(msg)
stream_order_map = pcr.streamorder(ldd_map_high_resolution)
#
# strahler order option
strahler_order_used = 6
#
# TODO: ignore smaller rivers (< 10 m)
#
pcr.report(stream_order_map, "high_resolution_stream_order.map")