Esempio n. 1
0
def snaptomap(points, mmap):
    """
    Snap the points in _points_ to nearest non missing
    values in _mmap_. Can be used to move gauge locations
    to the nearest rivers.

    Input:
        - points - map with points to move
        - mmap - map with points to move to

    Return:
        - map with shifted points
    """
    points = pcr.cover(points, 0)
    # Create unique id map of mmap cells
    unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
    # Now fill holes in mmap map with lues indicating the closes mmap cell.
    dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
    # Get map with values at location in points with closes mmap cell
    dist_cellid = pcr.ifthenelse(points > 0, dist_cellid, 0)
    # Spread this out
    dist_fill = pcr.spreadzone(pcr.nominal(dist_cellid), 0, 1)
    # Find the new (moved) locations
    npt = pcr.uniqueid(pcr.boolean(pcr.ifthen(dist_fill == unq, unq)))
    # Now recreate the original value in the points maps
    ptcover = pcr.spreadzone(pcr.cover(points, 0), 0, 1)
    # Now get the org point value in the pt map
    nptorg = pcr.ifthen(npt > 0, ptcover)

    return nptorg
Esempio n. 2
0
def snaptomap(points, mmap):
    """
    Snap the points in _points_ to nearest non missing
    values in _mmap_. Can be used to move gauge locations
    to the nearest rivers.

    Input:
        - points - map with points to move
        - mmap - map with points to move to

    Return:
        - map with shifted points
    """
    points = pcr.cover(points, 0)
    # Create unique id map of mmap cells
    unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
    # Now fill holes in mmap map with lues indicating the closes mmap cell.
    dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
    # Get map with values at location in points with closes mmap cell
    dist_cellid = pcr.ifthenelse(points > 0, dist_cellid, 0)
    # Spread this out
    dist_fill = pcr.spreadzone(pcr.nominal(dist_cellid), 0, 1)
    # Find the new (moved) locations
    npt = pcr.uniqueid(pcr.boolean(pcr.ifthen(dist_fill == unq, unq)))
    # Now recreate the original value in the points maps
    ptcover = pcr.spreadzone(pcr.cover(points, 0), 0, 1)
    # Now get the org point value in the pt map
    nptorg = pcr.ifthen(npt > 0, ptcover)

    return nptorg
Esempio n. 3
0
def subcatch_stream(ldd, stream, threshold):
    """
    Derive catchments based upon strahler threshold
    Input:
        ldd -- pcraster object direction, local drain directions
        stream -- pcraster object direction, streamorder
        threshold -- integer, strahler threshold, subcatchments ge threshold are
                 derived
    output:
        stream_ge -- pcraster object, streams of strahler order ge threshold
        subcatch -- pcraster object, subcatchments of strahler order ge threshold

    """
    # derive stream order

    # stream = pcr.streamorder(ldd)
    stream_ge = pcr.ifthen(stream >= threshold, stream)
    stream_up_sum = pcr.ordinal(
        pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
    # detect any transfer of strahler order, to a higher strahler order.
    transition_strahler = pcr.ifthenelse(
        pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
        pcr.ifthenelse(
            pcr.nominal(ldd) == 5, pcr.boolean(1),
            pcr.ifthenelse(
                pcr.downstream(ldd, pcr.scalar(stream_up_sum)) >
                pcr.scalar(stream_ge), pcr.boolean(1), pcr.boolean(0))))

    # make unique ids (write to file)
    transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))

    # derive upstream catchment areas (write to file)
    subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
    return stream_ge, subcatch
Esempio n. 4
0
    def overview(self, msr, pollution_threshold=2):
        """Give the overview of costs and st.dev for dem and minor embankments.
        """
        msr_type = msr.settings.loc['msr_type', 1]
        name = msr_type + '_' + msr.settings.loc['ID']

        # Separate between clean and polluted areas
        area_clean = pcr.ifthen(self.pollution_zones >= pollution_threshold,\
                                pcr.nominal(1))
        area_polluted = pcr.ifthen(self.pollution_zones < pollution_threshold,\
                                   pcr.nominal(1))

        area_clean = pcr.defined(msr.area) & pcr.defined(area_clean)
        area_clean = pcr.ifthen(area_clean, pcr.boolean(1))
        area_polluted = pcr.defined(msr.area) &\
                                    pcr.defined(area_polluted)
        area_polluted = pcr.ifthen(area_polluted, pcr.boolean(1))

        # Calculate costs and stddev for all earthwork types.
        flpl_low_values = self.dem_lowering(msr, area_polluted)
        minemb_low_values = self.minemb_lowering(msr, area_polluted)
        groyne_lowering_values = self.groyne_lowering(msr)
        cost_ew = pd.concat(
            [flpl_low_values, groyne_lowering_values, minemb_low_values])

        cost_df = cost_ew.iloc[:, 0:1].T
        cost_df.index = [name]
        std_df = cost_ew.iloc[:, 1:2].T
        std_df.index = [name]
        return cost_df, std_df
Esempio n. 5
0
def subcatch_stream(ldd, stream, threshold):
    """
    Derive catchments based upon strahler threshold
    Input:
        ldd -- pcraster object direction, local drain directions
        stream -- pcraster object direction, streamorder
        threshold -- integer, strahler threshold, subcatchments ge threshold are
                 derived
    output:
        stream_ge -- pcraster object, streams of strahler order ge threshold
        subcatch -- pcraster object, subcatchments of strahler order ge threshold

    """
    # derive stream order

    # stream = pcr.streamorder(ldd)
    stream_ge = pcr.ifthen(stream >= threshold, stream)
    stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
    # detect any transfer of strahler order, to a higher strahler order.
    transition_strahler = pcr.ifthenelse(pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
                                         pcr.ifthenelse(pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse(pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1),
                                                                                           pcr.boolean(0))))

    # make unique ids (write to file)
    transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))

    # derive upstream catchment areas (write to file)
    subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
    return stream_ge, subcatch
Esempio n. 6
0
def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False):
	# v: inputMapFileName or floating values
	# cloneMapFileName: If the inputMap and cloneMap have different clones,
	#                   resampling will be done.   
    #logger.info('read file/values: '+str(v))
    if v == "None":
        PCRmap = str("None")
    elif not re.match(r"[0-9.-]*$",v):
        if absolutePath != None: v = getFullPath(v,absolutePath)
        # print(v)
        sameClone = isSameClone(v,cloneMapFileName)
        if sameClone == True:
            PCRmap = pcr.readmap(v)
        else:
            # resample using GDAL:
            output = tmpDir+'temp.map'
            warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
            # read from temporary file and delete the temporary file:
            PCRmap = pcr.readmap(output)
            if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
            if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
            if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) >  0., PCRmap)
            if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
            co = 'rm '+str(tmpDir)+'*.*'
            cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open('/dev/null'),shell=True).communicate()
    else:
        PCRmap = pcr.scalar(float(v))
    if cover != None:
        PCRmap = pcr.cover(PCRmap, cover)
    co = None; cOut = None; err = None; warp = None
    del co; del cOut; del err; del warp
    stdout = None; del stdout
    stderr = None; del stderr
    return PCRmap    
Esempio n. 7
0
    def test_001(self):
        """ nonspatial and pcr2numpy """
        nrRows, nrCols, cellSize = 5, 8, 1.0
        west, north = 0.0, 0.0
        pcraster.setclone(nrRows, nrCols, cellSize, west, north)

        value = 1.23456
        nonspatial = pcraster.scalar(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)

        value = 3
        nonspatial = pcraster.nominal(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)

        value = True
        nonspatial = pcraster.boolean(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)
Esempio n. 8
0
def stringVector2raster(inShp, inField, cloneFile, outMap, title=None):
    """Rasterizes a string field in inShp to a PCRaster map. The unique values
    in the shapefile are converted to nominal values. The values and strings
    are contained in the legend of the output map on disk

    Input:
    inShp:   input shapefile,
    inField: field to be rasterized
    outMap:  PCRaster map written to disk
    """

    # add a column to a copy of the shapefile
    gdf = gpd.read_file(inShp)
    classes = gdf[inField].unique()
    outField = 'TMP_FIELD'
    lut = {classes[k]: k + 1 for k in range(len(classes))}
    gdf[outField] = gdf.apply(lambda row: lut[row[inField]], axis=1)
    gdf_subset = gdf.loc[:, ['geometry', inField, 'TMP_FIELD']]
    gdf_subset.to_file('tmp.shp')

    #-rasterize the copy of the shapefile and report the map with the legend.
    pcrMap = vector2raster('ESRI Shapefile', cloneFile, 'tmp.shp', outField)
    reportMapWithLegend(pcr.nominal(pcrMap), outMap,\
                              {v:k for k,v in lut.iteritems()},\
                              title = title)
    return pcr.readmap(outMap)
Esempio n. 9
0
  def test_001(self):
      """ nonspatial and pcr2numpy """
      nrRows, nrCols, cellSize = 5, 8, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      value = 1.23456
      nonspatial = pcraster.scalar(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)

      value = 3
      nonspatial = pcraster.nominal(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)

      value = True
      nonspatial = pcraster.boolean(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)
Esempio n. 10
0
    def testNonSpatialConversions(self):
        nonSpatialValue = pcraster.mapmaximum(
            pcraster.readmap("map2asc_PCRmap.map"))
        # Ordinal.
        nonSpatial = pcraster.ordinal(nonSpatialValue)
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 124)
        self.assertEqual(float(nonSpatial), 124.0)

        # Nominal.
        nonSpatial = pcraster.nominal(nonSpatialValue)
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 124)
        self.assertEqual(float(nonSpatial), 124)

        # Boolean.
        nonSpatial = pcraster.boolean(nonSpatialValue)
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 1)
        self.assertEqual(float(nonSpatial), 1.0)

        # Scalar.
        nonSpatial = pcraster.scalar(pcraster.mapmaximum("abs_Expr.map"))
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 14)
        self.assertEqual(float(nonSpatial), 14.0)
Esempio n. 11
0
def zonalSumArea(nominalMap, areaClass):
    """Memory efficient method to sum up the surface area of the different 
        classes in the nominal map. Separate by the regions in areaClass
        
        input:
            nominalMap: nominal map, e.g. ecotope map
            areaClass: regions to compute surface areas over
    """ 
    #-create a pointMap of the output locations, one for each areaClass
    outputPointMap = pcrr.pointPerClass(areaClass)
    
    #-iniate output DataFrame
    dfInit = pcrr.getCellValues(outputPointMap, mapList = [areaClass], columns = ['areaClass'])
 
    #-loop over the classes in nominalMap and compute the summed area per areaClass
    IDs = np.unique(pcr.pcr2numpy(nominalMap, -9999))[1:]
    dfList = []
    for ID in IDs[:]:
        pcrID = pcr.nominal(ID)
        pcr.setglobaloption('unittrue')
        IDArea = pcr.ifthen(nominalMap == pcrID, pcr.cellarea())
        sectionSum = pcr.areatotal(IDArea, areaClass)
        df = pcrr.getCellValues(outputPointMap, [sectionSum], [ID])
            # df columns = rowIdx, colIdx, ID
        df = df.drop(['rowIdx', 'colIdx'],  axis=1)
        dfList.append(df)
        
    dfOut = dfInit.join(dfList)
    #return dfInit, df, dfOut, dfList
    return dfOut
Esempio n. 12
0
def remove_thd(thin_dams, removal_area):
    """
    Remove the thin dams within the removal area.
    ----------
    Parameters
        thin_dams: Polygon GeoDataFrame with D-Flow FM thin dams
        removal_area: Boolean PCRaster map with area to be removed.
    Raster is converted to a polygon, and the difference with the thin dams
    is determined.
    """
    removal_area = pcr.nominal(removal_area + 10000)
    pcr.report(removal_area, 'rem_area.map')
    mapIO.pcr2Shapefile('rem_area.map', 'mask.shp', 'areas')
    pols = gpd.GeoDataFrame.from_file('mask.shp')
    pols = pols[pols.areas > 0]
    pols = explode(gpd.GeoDataFrame(geometry=pols.buffer(0)))
    # remove self-intersections from polygonized rasters

    #    print 'Removing thin dams...'
    # select using polygon. This is the neat way of cutting out pieces of
    # the thin dam polygons.

    thd_rem = thin_dams['geometry'].difference(pols.geometry.unary_union)
    thd_rem = gpd.GeoDataFrame(thd_rem[thd_rem.area > 2].reset_index())
    thd_rem.rename(columns={0: 'geometry'}, inplace=True)

    out_gdf = thd_rem.loc[:, ['geometry', 'label']]
    out_gdf = explode(out_gdf)
    out_gdf['label'] = [
        '%s_%s' % (ii, ii + 1) for ii in range(1,
                                               len(out_gdf) + 1, 1)
    ]
    return out_gdf
Esempio n. 13
0
def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False):
	# v: inputMapFileName or floating values
	# cloneMapFileName: If the inputMap and cloneMap have different clones,
	#                   resampling will be done.   
    print(v)
    if v == "None":
        PCRmap = str("None")
    elif not re.match(r"[0-9.-]*$",v):
        if absolutePath != None: v = getFullPath(v,absolutePath)
        # print(v)
        sameClone = isSameClone(v,cloneMapFileName)
        if sameClone == True:
            PCRmap = pcr.readmap(v)
        else:
            # resample using GDAL:
            output = tmpDir+'temp.map'
            warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
            # read from temporary file and delete the temporary file:
            PCRmap = pcr.readmap(output)
            if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
            if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
            if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) >  0., PCRmap)
            if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
            co = 'rm '+str(tmpDir)+'*.*'
            cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open('/dev/null'),shell=True).communicate()
    else:
        PCRmap = pcr.scalar(float(v))
    if cover != None:
        PCRmap = pcr.cover(PCRmap, cover)
    co = None; cOut = None; err = None; warp = None
    del co; del cOut; del err; del warp
    stdout = None; del stdout
    stderr = None; del stderr
    return PCRmap    
Esempio n. 14
0
  def testNonSpatialConversions(self):
    nonSpatialValue = pcraster.mapmaximum(pcraster.readmap("map2asc_PCRmap.map"))
    # Ordinal.
    nonSpatial = pcraster.ordinal(nonSpatialValue)
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 124)
    self.assertEqual(float(nonSpatial), 124.0)

    # Nominal.
    nonSpatial = pcraster.nominal(nonSpatialValue)
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 124)
    self.assertEqual(float(nonSpatial), 124)

    # Boolean.
    nonSpatial = pcraster.boolean(nonSpatialValue)
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 1)
    self.assertEqual(float(nonSpatial), 1.0)

    # Scalar.
    nonSpatial = pcraster.scalar(pcraster.mapmaximum("abs_Expr.map"))
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 14)
    self.assertEqual(float(nonSpatial), 14.0)
Esempio n. 15
0
    def _parseLine(self, line, lineNumber, nrColumns, externalNames, keyDict):

        line = re.sub("\n", "", line)
        line = re.sub("\t", " ", line)
        result = None

        # read until first comment
        content = ""
        content, sep, comment = line.partition("#")
        if len(content) > 1:
            collectionVariableName, sep, tail = content.partition(" ")
            if collectionVariableName == self._varName:
                tail = tail.strip()
                key, sep, variableValue = tail.rpartition(" ")

                if len(key.split()) != nrColumns:
                    tmp = re.sub("\(|\)|,", "", str(key))
                    msg = "Error reading %s line %d, order of columns given (%s columns) does not match expected order of %s columns" % (
                        self._fileName, lineNumber, len(key.split()) + 2,
                        int(nrColumns) + 2)
                    raise ValueError(msg)

                variableValue = re.sub('\"', "", variableValue)

                tmp = None
                try:
                    tmp = int(variableValue)
                    if self._dataType == pcraster.Boolean:
                        tmp = pcraster.boolean(tmp)
                    elif self._dataType == pcraster.Nominal:
                        tmp = pcraster.nominal(tmp)
                    elif self._dataType == pcraster.Ordinal:
                        tmp = pcraster.ordinal(tmp)
                    elif self._dataType == pcraster.Ldd:
                        tmp = pcraster.ldd(tmp)
                    else:
                        msg = "Conversion to %s failed" % (self._dataType)
                        raise Exception(msg)
                except ValueError, e:
                    try:
                        tmp = float(variableValue)
                        if self._dataType == pcraster.Scalar:
                            tmp = pcraster.scalar(tmp)
                        elif self._dataType == pcraster.Directional:
                            tmp = pcraster.directional(tmp)
                        else:
                            msg = "Conversion to %s failed" % (self._dataType)
                            raise Exception(msg)

                    except ValueError, e:
                        variableValue = re.sub("\\\\", "/", variableValue)
                        variableValue = variableValue.strip()
                        path = os.path.normpath(variableValue)
                        try:
                            tmp = pcraster.readmap(path)
                        except RuntimeError, e:
                            msg = "Error reading %s line %d, %s" % (
                                self._fileName, lineNumber, e)
                            raise ValueError(msg)
Esempio n. 16
0
    def premcloop(self):
        self.clone = pcr.boolean(cfg.cloneString)
        self.dem = pcr.scalar(cfg.dem)
        self.createInstancesPremcloop()

        # required for reporting as numpy
        self.locations = pcr.cover(pcr.nominal(cfg.locations), 0)
        pcr.report(self.locations, 'locats')

        self.forestNoForest = pcr.boolean(cfg.forestNoForest)
        idMap = pcr.uniqueid(self.clone)
        oneLocationPerArea = pcr.areamaximum(idMap,
                                             self.forestNoForest) == idMap
        self.locationsForParameters = pcr.cover(
            pcr.nominal(
                pcr.scalar(pcr.ifthen(oneLocationPerArea, self.forestNoForest))
                + 1), 0)
Esempio n. 17
0
def readPCRmapClone(v,
                    cloneMapFileName,
                    tmpDir,
                    absolutePath=None,
                    isLddMap=False,
                    cover=None,
                    isNomMap=False):
    # v: inputMapFileName or floating values
    # cloneMapFileName: If the inputMap and cloneMap have different clones,
    #                   resampling will be done.
    logger.debug('read file/values: ' + str(v))
    if v == "None":
        #~ PCRmap = str("None")
        PCRmap = None  # 29 July: I made an experiment by changing the type of this object.
    elif not re.match(r"[0-9.-]*$", v):
        if absolutePath != None: v = getFullPath(v, absolutePath)
        # print v
        # print cloneMapFileName
        sameClone = isSameClone(v, cloneMapFileName)
        if sameClone == True:
            PCRmap = pcr.readmap(v)
        else:
            # resample using GDAL:
            output = tmpDir + 'temp.map'
            warp = gdalwarpPCR(v, output, cloneMapFileName, tmpDir, isLddMap,
                               isNomMap)
            # read from temporary file and delete the temporary file:
            PCRmap = pcr.readmap(output)
            if isLddMap == True:
                PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
            if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
            if isNomMap == True:
                PCRmap = pcr.ifthen(pcr.scalar(PCRmap) > 0., PCRmap)
            if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
            if os.path.isdir(tmpDir):
                shutil.rmtree(tmpDir)
            os.makedirs(tmpDir)
    else:
        PCRmap = pcr.spatial(pcr.scalar(float(v)))
    if cover != None:
        PCRmap = pcr.cover(PCRmap, cover)
    co = None
    cOut = None
    err = None
    warp = None
    del co
    del cOut
    del err
    del warp
    stdout = None
    del stdout
    stderr = None
    del stderr

    # SM: revisit this
    PCRmap = pcr.pcr2numpy(PCRmap, np.nan)

    return PCRmap
Esempio n. 18
0
  def _parseLine(self, line, lineNumber, nrColumns, externalNames, keyDict):

    line = re.sub("\n","",line)
    line = re.sub("\t"," ",line)
    result = None

    # read until first comment
    content = ""
    content,sep,comment = line.partition("#")
    if len(content) > 1:
      collectionVariableName, sep, tail = content.partition(" ")
      if collectionVariableName == self._varName:
        tail = tail.strip()
        key, sep, variableValue = tail.rpartition(" ")

        if len(key.split()) != nrColumns:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, order of columns given (%s columns) does not match expected order of %s columns" %(self._fileName, lineNumber, len(key.split()) + 2, int(nrColumns) + 2)
          raise ValueError(msg)

        variableValue = re.sub('\"', "", variableValue)

        tmp = None
        try:
          tmp = int(variableValue)
          if self._dataType == pcraster.Boolean:
            tmp = pcraster.boolean(tmp)
          elif self._dataType == pcraster.Nominal:
            tmp = pcraster.nominal(tmp)
          elif self._dataType == pcraster.Ordinal:
            tmp = pcraster.ordinal(tmp)
          elif self._dataType == pcraster.Ldd:
            tmp = pcraster.ldd(tmp)
          else:
            msg = "Conversion to %s failed" % (self._dataType)
            raise Exception(msg)
        except ValueError, e:
          try:
            tmp = float(variableValue)
            if self._dataType == pcraster.Scalar:
              tmp = pcraster.scalar(tmp)
            elif self._dataType == pcraster.Directional:
              tmp = pcraster.directional(tmp)
            else:
              msg = "Conversion to %s failed" % (self._dataType)
              raise Exception(msg)

          except ValueError,e:
            variableValue = re.sub("\\\\","/",variableValue)
            variableValue = variableValue.strip()
            path = os.path.normpath(variableValue)
            try:
              tmp = pcraster.readmap(path)
            except RuntimeError, e:
              msg = "Error reading %s line %d, %s" %(self._fileName, lineNumber, e)
              raise ValueError(msg)
Esempio n. 19
0
def spatialInterpolation2PCR(fieldArray, pcrType, MV):
    #-interpolates the field array to the full extent
    field = pcr.numpy2pcr(pcrType, fieldArray, MV)
    cellID = pcr.nominal(pcr.uniqueid(pcr.defined(field)))
    zoneID = pcr.spreadzone(cellID, 0, 1)
    if pcrType == pcr.Scalar:
        field = pcr.areaaverage(field, zoneID)
    else:
        field = pcr.areamajority(field, zoneID)
    return field
Esempio n. 20
0
    def sample(self, expression):
        """
    Sampling the current values of 'expression' at the given locations for the current timestep
    """

        arrayRowPos = self._userModel.currentTimeStep(
        ) - self._userModel.firstTimeStep()

        #if isinstance(expression, float):
        #  expression = pcraster.scalar(expression)

        try:
            # store the data type for tss file header
            if self._spatialDatatype == None:
                self._spatialDatatype = str(expression.dataType())
        except AttributeError as e:
            datatype, sep, tail = str(e).partition(" ")
            msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % (
                datatype)
            raise AttributeError(msg)

        if self._spatialIdGiven:
            if expression.dataType() == pcraster.Scalar or expression.dataType(
            ) == pcraster.Directional:
                tmp = pcraster.areaaverage(pcraster.spatial(expression),
                                           pcraster.spatial(self._spatialId))
            else:
                tmp = pcraster.areamajority(pcraster.spatial(expression),
                                            pcraster.spatial(self._spatialId))

            col = 0
            for cellIndex in self._sampleAddresses:
                value, valid = pcraster.cellvalue(tmp, cellIndex)
                if not valid:
                    value = Decimal("NaN")

                self._sampleValues[arrayRowPos][col] = value
                col += 1
        else:
            if expression.dataType() == pcraster.Scalar or expression.dataType(
            ) == pcraster.Directional:
                tmp = pcraster.maptotal(pcraster.spatial(expression))\
                      / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression))))
            else:
                tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\
                      pcraster.spatial(pcraster.nominal(1)))))

            value, valid = pcraster.cellvalue(tmp, 1)
            if not valid:
                value = Decimal("NaN")

            self._sampleValues[arrayRowPos] = value

        if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps():
            self._writeTssFile()
Esempio n. 21
0
def empty_measure(eco_clone):
    """Create an empty river measure."""
    empty_map = pcr.scalar(mapIO.emptyMap(eco_clone.pcr_map))
    empty_eco = measures.LegendMap(pcr.nominal(empty_map), eco_clone.legend_df)
    settings = pd.DataFrame(
        {1: ['lowering', 'dummy']},
        index=['msr_type', 'ID'],
    )
    msr = measures.Measure(settings, empty_map, empty_map, empty_eco,
                           empty_map, empty_map, empty_map, empty_map)
    return msr
Esempio n. 22
0
    def test_9(self):
        """ test ifthenelse and nonspatial arguments """

        pcraster.setclone(1, 3, 1, 1, 1)

        cond = pcraster.numpy2pcr(pcraster.Boolean,
                                  numpy.array([[1, 0, 5]], numpy.int8), 5)

        # Test some Python data types in condition
        res = ifthenelse(True, pcraster.boolean(1.0), pcraster.boolean(0))
        self.assertEqual(pcraster.cellvalue(res, 1, 1), (True, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 2), (True, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 3), (True, True))

        res = ifthenelse(False, pcraster.boolean(1.0), pcraster.boolean(0))
        self.assertEqual(pcraster.cellvalue(res, 1, 1), (False, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 2), (False, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 3), (False, True))

        res = ifthenelse(5, pcraster.boolean(1.0), pcraster.boolean(0))
        self.assertEqual(pcraster.cellvalue(res, 1, 1), (True, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 2), (True, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 3), (True, True))

        res = ifthenelse(0, pcraster.boolean(1.0), pcraster.boolean(0))
        self.assertEqual(pcraster.cellvalue(res, 1, 1), (False, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 2), (False, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 3), (False, True))

        # Spatial condition
        res = ifthenelse(cond, pcraster.boolean(1.0), pcraster.boolean(0))
        self.assertEqual(pcraster.cellvalue(res, 1, 1), (True, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 2), (False, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 3)[1], False)

        # Non-spatial condition
        res = ifthenelse(pcraster.boolean(1.0), pcraster.nominal(3.0),
                         pcraster.nominal(2))
        self.assertEqual(pcraster.cellvalue(res, 1, 1), (3, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 2), (3, True))
        self.assertEqual(pcraster.cellvalue(res, 1, 3), (3, True))
Esempio n. 23
0
def selectCatchments(ldd, pitsMap, pitsDict, continentList, cloneMap):
    catchments = pcr.ifthen(pcr.boolean(pcr.readmap(cloneMap)) == 0, pcr.scalar(1))    
    for continent in pitsDict.iterkeys():
        continentNr   = pitsDict[continent][0]
        pitsContinent = pitsDict[continent][1:]
        print 'updating catchments for ', continent
        for pitNr in pitsContinent:
            pitMap = pcr.ifthen(pitsMap == pitNr, pcr.scalar(continentNr))
            catchment = pcr.catchment(ldd, pcr.nominal(pitMap))
            catchment = pcr.ifthen(pcr.boolean(catchment) == 1, catchment)
            catchments = pcr.cover(catchments, pcr.scalar(catchment))
    return catchments
Esempio n. 24
0
def xyzWAQUA2Pcr(xyzFile, gridIDMap, cloneFile):
    """read an WAQUA .xyz file into a raster map

    xyzFile:    x,y,z,m,n,id column file with a one line header
    gridIDmap:  pcraster map with the rgf grid cell IDs
    cloneFile:  pcraster clonefile to be used in col2map
    """

    xyz = np.loadtxt(xyzFile, delimiter=',', skiprows=1)
    xyzPointMap = col2map(xyz, cloneFile, args='-S -a -m -999999')
    xyzAreaMap = pcr.areaaverage(xyzPointMap, pcr.nominal(gridIDMap))
    return xyzAreaMap
Esempio n. 25
0
  def test_002(self):
      """ nonspatial and pcr_as_numpy """
      nrRows, nrCols, cellSize = 3, 2, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      nonspatial = pcraster.nominal(5)

      with self.assertRaises(Exception) as context_manager:
          array = pcraster.pcr_as_numpy(nonspatial)

      self.assertEqual(str(context_manager.exception),
          "Argument is non-spatial, only spatial PCRaster data types are supported")
    def initiate_modflow(self):

        logger.info("Initializing pcraster modflow.")
        
        # initialise 
        self.pcr_modflow = None
        self.pcr_modflow = pcr.initialise(pcr.clone())
        
        # grid specification - one layer model
        top    = self.dem_average
        bottom = top - self.totalGroundwaterThickness
        self.pcr_modflow.createBottomLayer(bottom, top) 
        
        # specification for the boundary condition (IBOUND, BAS package)
        # - active cells only in landmask
        # - constant head for outside the landmask
        ibound = pcr.ifthen(self.landmask, pcr.nominal(1))
        ibound = pcr.cover(ibound, pcr.nominal(-1))
        self.pcr_modflow.setBoundary(ibound, 1)
        
        # specification for conductivities (BCF package)
        horizontal_conductivity = self.kSatAquifer # unit: m/day
        # set the minimum value for transmissivity; (Deltares's default value: 10 m2/day)
        minimimumTransmissivity = 10.
        horizontal_conductivity = pcr.max(minimimumTransmissivity, \
                                          horizontal_conductivity * self.totalGroundwaterThickness) / self.totalGroundwaterThickness
        vertical_conductivity   = horizontal_conductivity                # dummy values, as one layer model is used
        self.pcr_modflow.setConductivity(00, horizontal_conductivity, \
                                             vertical_conductivity, 1)              
        
        # specification for storage coefficient
        # - correction due to the usage of lat/lon coordinates
        primary = pcr.cover(self.specificYield * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0)
        primary = pcr.max(1e-20, primary)
        secondary = primary                                           # dummy values as we used layer type 00
        self.pcr_modflow.setStorage(primary, secondary, 1)
        
        # set drain package
        self.set_drain_package()
Esempio n. 27
0
def area_total_value(values, area_class):
    """Calculate the total value over the area class.

    values: map with values
    area_class: project area of the measure

    Returns a float with the total value over the area class.
    """
    area_total = pcr.areatotal(pcr.scalar(values), pcr.nominal(area_class))
    total_value, _ = pcr.cellvalue(pcr.mapmaximum(area_total), 1, 1)
    if total_value <= -3.40282346638e+38:  # return 0 if only missing values
        total_value = 0
    return total_value
Esempio n. 28
0
  def testNonSpatialCreation(self):
    value = 198329008
    nonSpatial = pcraster.nominal(value)
    self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)


    # OK
    value = 10000
    i = 0
    while i < 100:
      nonSpatial = pcraster.nominal(value)
      self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)
      value += 13
      i += 1

    # Not OK
    bugzilla144 = False
    if not bugzilla144:
      print("skipped bugzilla144")
    else:
      value = 198329008
      i = 0
      while i < 100:
        nonSpatial = pcraster.nominal(value)
        self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)
        value += 13
        i += 1

      # Bug is in the _PCRaster/_PCRaster.cc:
      #   newNonSpatialIntegralField code.
      #  CW Jul-14/2008 CW can not trace it back to newNonSpatialIntegralField
      value = 198329012
      nonSpatial = pcraster.nominal(value)
      self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)

      value = 198329020
      nonSpatial = pcraster.nominal(value)
      self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)
Esempio n. 29
0
  def testNonSpatialCreation(self):
    value = 198329008
    nonSpatial = pcraster.nominal(value)
    self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)


    # OK
    value = 10000
    i = 0
    while i < 100:
      nonSpatial = pcraster.nominal(value)
      self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)
      value += 13
      i += 1

    # Not OK
    bugzilla144 = False
    if not bugzilla144:
      print("skipped bugzilla144")
    else:
      value = 198329008
      i = 0
      while i < 100:
        nonSpatial = pcraster.nominal(value)
        self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)
        value += 13
        i += 1

      # Bug is in the _PCRaster/_PCRaster.cc:
      #   newNonSpatialIntegralField code.
      #  CW Jul-14/2008 CW can not trace it back to newNonSpatialIntegralField
      value = 198329012
      nonSpatial = pcraster.nominal(value)
      self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)

      value = 198329020
      nonSpatial = pcraster.nominal(value)
      self.assertEqual(pcraster.cellvalue(nonSpatial, 1)[0], value)
Esempio n. 30
0
  def sample(self, expression):
    """
    Sampling the current values of 'expression' at the given locations for the current timestep
    """

    arrayRowPos = self._userModel.currentTimeStep() - self._userModel.firstTimeStep()

    #if isinstance(expression, float):
    #  expression = pcraster.scalar(expression)

    try:
      # store the data type for tss file header
      if self._spatialDatatype == None:
        self._spatialDatatype = str(expression.dataType())
    except AttributeError as e:
      datatype, sep, tail = str(e).partition(" ")
      msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % (datatype)
      raise AttributeError(msg)

    if self._spatialIdGiven:
      if expression.dataType() == pcraster.Scalar or expression.dataType() == pcraster.Directional:
        tmp = pcraster.areaaverage(pcraster.spatial(expression), pcraster.spatial(self._spatialId))
      else:
        tmp = pcraster.areamajority(pcraster.spatial(expression), pcraster.spatial(self._spatialId))

      col = 0
      for cellIndex in self._sampleAddresses:
        value, valid = pcraster.cellvalue(tmp, cellIndex)
        if not valid:
          value = Decimal("NaN")

        self._sampleValues[arrayRowPos][col] = value
        col += 1
    else:
      if expression.dataType() == pcraster.Scalar or expression.dataType() == pcraster.Directional:
         tmp = pcraster.maptotal(pcraster.spatial(expression))\
               / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression))))
      else:
         tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\
               pcraster.spatial(pcraster.nominal(1)))))

      value, valid = pcraster.cellvalue(tmp, 1)
      if not valid:
        value = Decimal("NaN")

      self._sampleValues[arrayRowPos] = value

    if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps():
       self._writeTssFile()
Esempio n. 31
0
def pt_flow_in_river(ldd, river):
    """
    Returns all points (True) that flow into the mak river (boolean map with river set to True)

    :param ldd: Drainage network
    :param river: Map of river (True River, False non-river)
    :return ifmap: map with infrlo points into the river (True)
    :return ctach: catchment of each of the inflow points
    """

    dspts = pcr.downstream(ldd, pcr.cover(river, 0))
    dspts = pcr.ifthenelse(pcr.cover(river, 0) == 1, 0, dspts)

    catch = pcr.subcatchment(ldd, pcr.nominal(pcr.uniqueid(dspts)))

    return dspts, catch
Esempio n. 32
0
def pt_flow_in_river(ldd, river):
    """
    Returns all points (True) that flow into the mak river (boolean map with river set to True)

    :param ldd: Drainage network
    :param river: Map of river (True River, False non-river)
    :return ifmap: map with infrlo points into the river (True)
    :return ctach: catchment of each of the inflow points
    """

    dspts = pcr.downstream(ldd, pcr.cover(river, 0))
    dspts = pcr.ifthenelse(pcr.cover(river, 0) == 1, 0, dspts)

    catch = pcr.subcatchment(ldd, pcr.nominal(pcr.uniqueid(dspts)))

    return dspts, catch
Esempio n. 33
0
    def test_003(self):
        """ pcr2numpy should not run out of memory """

        nrRows, nrCols, cellSize = 200, 200, 1.0
        west, north = 0.0, 0.0
        pcraster.setclone(nrRows, nrCols, cellSize, west, north)

        raster = pcraster.uniform(1)

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        init_mem = mem.rss / 2**10

        nr_iterations = 50
        mem_increase = False

        # small memory increase can occur at runtime
        # allow for, but less than iterations * size(raster)
        max_diff = 400

        for it in range(0, nr_iterations):
            pcraster.pcr2numpy(raster, numpy.nan)
            mem = process.memory_info()
            curr_mem = mem.rss / 2**10
            if curr_mem - init_mem > max_diff:
                mem_increase = True

        raster = pcraster.spatial(pcraster.boolean(1))

        for it in range(0, nr_iterations):
            pcraster.pcr2numpy(raster, numpy.nan)
            mem = process.memory_info()
            curr_mem = mem.rss / 2**10
            if curr_mem - init_mem > max_diff:
                mem_increase = True

        raster = pcraster.nominal(pcraster.uniform(1) * 10)

        for it in range(0, nr_iterations):
            pcraster.pcr2numpy(raster, numpy.nan)
            mem = process.memory_info()
            curr_mem = mem.rss / 2**10
            if curr_mem - init_mem > max_diff:
                mem_increase = True

        self.assertEqual(mem_increase, False)
Esempio n. 34
0
  def test_003(self):
      """ pcr2numpy should not run out of memory """

      nrRows, nrCols, cellSize = 200, 200, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      raster = pcraster.uniform(1)

      process = psutil.Process(os.getpid())
      mem = process.memory_info()
      init_mem = mem.rss / 2**10

      nr_iterations = 50
      mem_increase = False

      # small memory increase can occur at runtime
      # allow for, but less than iterations * size(raster)
      max_diff = 400

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      raster = pcraster.spatial(pcraster.boolean(1))

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      raster = pcraster.nominal(pcraster.uniform(1) * 10)

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      self.assertEqual(mem_increase, False)
    def __init__(self, input_files,\
                      output_files,\
                       modelTime,\
                       main_tmp_dir = "/dev/shm/"):
        DynamicModel.__init__(self) 

        self.input_files  = input_files
        self.output_files = output_files 

        self.modelTime = modelTime

        # main temporary directory 
        self.main_tmp_dir = main_tmp_dir+"/"+vos.get_random_word()
        # make the temporary directory if not exist yet 
        try: 
            os.makedirs(self.main_tmp_dir)
        except:
            os.system('rm -r '+str(self.main_tmp_dir)+'*')
            os.makedirs(self.main_tmp_dir)

        # clone map for pcraster process - depend on the resolution of the basin/catchment map
        pcr.setclone(self.input_files["basin30minmap"]) 
        self.clone_map = pcr.boolean(1.0)
        #
        # catchment ids map
        self.catchment = pcr.nominal(\
                         pcr.readmap(self.input_files["basin30minmap"]))
        self.catchment = pcr.ifthen(pcr.scalar(self.catchment) > 0.0,\
                             self.catchment)
        # cell area map
        self.cell_area = pcr.cover(pcr.readmap(self.input_files["area30min_map"]), 0.0)
        
        # prepare grace monthly and annual anomaly time series
        self.pre_process_grace_file()

        # prepare model monthly and annual anomaly time series
        self.pre_process_model_file()

        # prepare object for writing netcdf files:
        self.output = OutputNetcdf(self.input_files["area30min_map"])
        self.output.createNetCDF(self.output_files['basinscale_tws_month_anomaly']['grace'], "lwe_thickness","m")
        self.output.createNetCDF(self.output_files['basinscale_tws_month_anomaly']['model'], "pcrglobwb_tws","m")
        self.output.createNetCDF(self.output_files['basinscale_tws_annua_anomaly']['grace'], "lwe_thickness","m")
        self.output.createNetCDF(self.output_files['basinscale_tws_annua_anomaly']['model'], "pcrglobwb_tws","m")
Esempio n. 36
0
    def __init__(self, input_files,\
                      output_files,\
                       modelTime,\
                       tmpDir = "/dev/shm/"):
        DynamicModel.__init__(self)           # 

        self.input_files  = input_files
        self.output_files = output_files 
        self.tmpDir = tmpDir

        self.modelTime = modelTime

        pcr.setclone(self.input_files["model_cell_area"])
        clone_map = pcr.boolean(1)
        
        # cell area (m2)
        self.cell_area = vos.readPCRmapClone(\
                        self.input_files["model_cell_area"],\
                        self.input_files["model_cell_area"],\
                        self.tmpDir)
        
        # resampling factor: ratio between target (coarse) and original (fine) resolution
        self.resample_factor = round(
                               vos.getMapAttributes(self.input_files["one_degree_id"],'cellsize')/\
                               vos.getMapAttributes(self.input_files["model_cell_area"],'cellsize'))
        
        # unique ids for upscaling to one degree resolution (grace resolution)
        self.one_degree_id = pcr.nominal(\
                             vos.readPCRmapClone(\
                            self.input_files["one_degree_id"],\
                            self.input_files["model_cell_area"],\
                            self.tmpDir))
        
        # object for reporting at coarse resolution (i.e. one arc degree - grace resolution)
        self.output = OutputNetcdf(self.input_files["one_degree_id"], self.input_files["model_cell_area"])       
        # preparing the netcdf file at coarse resolution:
        self.output.createNetCDF(self.output_files['one_degree_tws']['model'], "pcrglobwb_tws","m")
        #
        # edit some attributes:
        attributeDictionary = {}
        attributeDictionary['description']      = "One degree resolution total water storage (tws), upscaled from PCR-GLOBWB result. "
        self.output.changeAtrribute(self.output_files['one_degree_tws']['model'],\
                                    attributeDictionary)                       
Esempio n. 37
0
  def testDeepCopyRasterNonSpatial(self):
    pcraster.setclone("validated/boolean_Result.map")

    raster = pcraster.boolean(1)
    tmp = copy.deepcopy(raster)
    self.assertEqual(True, self.arbitraryMapEquals(raster, tmp))
    raster1 = pcraster.nominal(1)
    tmp1 = copy.deepcopy(raster1)
    self.assertEqual(True, self.arbitraryMapEquals(raster1, tmp1))
    raster2 = pcraster.ordinal(1)
    tmp2 = copy.deepcopy(raster2)
    self.assertEqual(True, self.arbitraryMapEquals(raster2, tmp2))
    raster3 = pcraster.scalar(1)
    tmp3 = copy.deepcopy(raster3)
    self.assertEqual(True, self.arbitraryMapEquals(raster3, tmp3))
    raster4 = pcraster.directional(1)
    tmp4 = copy.deepcopy(raster4)
    self.assertEqual(True, self.arbitraryMapEquals(raster4, tmp4))
    raster5 = pcraster.ldd(1)
    tmp5 = copy.deepcopy(raster5)
    self.assertEqual(True, self.arbitraryMapEquals(raster5, tmp5))
Esempio n. 38
0
    def testDeepCopyRasterNonSpatial(self):
        pcraster.setclone("validated/boolean_Result.map")

        raster = pcraster.boolean(1)
        tmp = copy.deepcopy(raster)
        self.assertEqual(True, self.arbitraryMapEquals(raster, tmp))
        raster1 = pcraster.nominal(1)
        tmp1 = copy.deepcopy(raster1)
        self.assertEqual(True, self.arbitraryMapEquals(raster1, tmp1))
        raster2 = pcraster.ordinal(1)
        tmp2 = copy.deepcopy(raster2)
        self.assertEqual(True, self.arbitraryMapEquals(raster2, tmp2))
        raster3 = pcraster.scalar(1)
        tmp3 = copy.deepcopy(raster3)
        self.assertEqual(True, self.arbitraryMapEquals(raster3, tmp3))
        raster4 = pcraster.directional(1)
        tmp4 = copy.deepcopy(raster4)
        self.assertEqual(True, self.arbitraryMapEquals(raster4, tmp4))
        raster5 = pcraster.ldd(1)
        tmp5 = copy.deepcopy(raster5)
        self.assertEqual(True, self.arbitraryMapEquals(raster5, tmp5))
Esempio n. 39
0
def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False,inputEPSG="EPSG:4326",outputEPSG="EPSG:4326",method="near"):
	# v: inputMapFileName or floating values
	# cloneMapFileName: If the inputMap and cloneMap have different clones,
	#                   resampling will be done.   
    logger.debug('read file/values: '+str(v))
    if v == "None":
        PCRmap = str("None")
    elif not re.match(r"[0-9.-]*$",v):
        if absolutePath != None: v = getFullPath(v,absolutePath)
        # print(v)
        sameClone = isSameClone(v,cloneMapFileName)
        if sameClone == True:
            PCRmap = pcr.readmap(v)
        else:
            # resample using GDAL:
            output = tmpDir+'temp.map'
            # if no re-projection needed:
            if inputEPSG == outputEPSG or outputEPSG == None: 
                warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
            else:
                warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap,inputEPSG,outputEPSG,method)
            # read from temporary file and delete the temporary file:
            PCRmap = pcr.readmap(output)
            if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
            if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
            if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) >  0., PCRmap)
            if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
            if os.path.isdir(tmpDir):
                shutil.rmtree(tmpDir)
            os.makedirs(tmpDir)
    else:
        PCRmap = pcr.scalar(float(v))
    if cover != None:
        PCRmap = pcr.cover(PCRmap, cover)
    co = None; cOut = None; err = None; warp = None
    del co; del cOut; del err; del warp
    stdout = None; del stdout
    stderr = None; del stderr
    return PCRmap    
def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False,inputEPSG="EPSG:4326",outputEPSG="EPSG:4326",method="near"):
	# v: inputMapFileName or floating values
	# cloneMapFileName: If the inputMap and cloneMap have different clones,
	#                   resampling will be done.   
    logger.debug('read file/values: '+str(v))
    if v == "None":
        PCRmap = str("None")
    elif not re.match(r"[0-9.-]*$",v):
        if absolutePath != None: v = getFullPath(v,absolutePath)
        # print(v)
        sameClone = isSameClone(v,cloneMapFileName)
        if sameClone == True:
            PCRmap = pcr.readmap(v)
        else:
            # resample using GDAL:
            output = tmpDir+'temp.map'
            # if no re-projection needed:
            if inputEPSG == outputEPSG or outputEPSG == None: 
                warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
            else:
                warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap,inputEPSG,outputEPSG,method)
            # read from temporary file and delete the temporary file:
            PCRmap = pcr.readmap(output)
            if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
            if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
            if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) >  0., PCRmap)
            if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
            if os.path.isdir(tmpDir):
                shutil.rmtree(tmpDir)
            os.makedirs(tmpDir)
    else:
        PCRmap = pcr.scalar(float(v))
    if cover != None:
        PCRmap = pcr.cover(PCRmap, cover)
    co = None; cOut = None; err = None; warp = None
    del co; del cOut; del err; del warp
    stdout = None; del stdout
    stderr = None; del stderr
    return PCRmap    
    def correction_per_aquifer(self, id):
        
        id = float(id); print id
        
        # identify aquifer mask  
        aquifer_landmask = pcr.ifthen(self.margat_aquifer_map == pcr.nominal(id), pcr.boolean(1))
        
        # obtain the logarithmic value of Margat value
        exp_margat_thick = pcr.cellvalue(\
                           pcr.mapmaximum(\
                           pcr.ifthen(aquifer_landmask, pcr.ln(self.margat_aquifer_thickness))), 1)[0]

        # obtain the logarithmic values of 'estimated thickness'
        exp_approx_thick = pcr.ifthen(aquifer_landmask, pcr.ln(self.approx_thick)) 
                       
        exp_approx_thick_array = pcr.pcr2numpy(exp_approx_thick, vos.MV)
        exp_approx_thick_array = exp_approx_thick_array[exp_approx_thick_array <> vos.MV]
        exp_approx_thick_array = exp_approx_thick_array[exp_approx_thick_array < 1000000.]
        
        # identify percentile
        exp_approx_minim = np.percentile(exp_approx_thick_array,  2.5);
        exp_approx_maxim = np.percentile(exp_approx_thick_array, 97.5); 

        # correcting
        exp_approx_thick_correct  = ( exp_approx_thick - exp_approx_minim ) / \
                                    ( exp_approx_maxim - exp_approx_minim )   
        exp_approx_thick_correct  = pcr.max(0.0, exp_approx_thick_correct )
        exp_approx_thick_correct *= pcr.max(0.0,\
                                    ( exp_margat_thick - exp_approx_minim ) )
        exp_approx_thick_correct += pcr.min(exp_approx_minim, exp_approx_thick)

        # maximum thickness
        exp_approx_thick_correct  = pcr.min(exp_margat_thick, exp_approx_thick_correct)

        # corrected thickness
        correct_thickness = pcr.exp(exp_approx_thick_correct)
        
        return correct_thickness
Esempio n. 42
0
def pcr_coast(dem, points):
    """  project points to coast with nearest neighbourhood
    finds coastal cells based on dem with NoDataValues and the locations of boundary conditions at sea
    using pcr spread the a nearest neighbor interpolation of the point ids is done for coastal cells

    :param dem: pcr dem
    :param points: pcrmap with ids in cells

    :returns: pcr map with location ids projected to coastline
    """
    # clump areas based on NoDataValues in dem
    dem_NoDataValues = pcr.cover(pcr.ifthenelse(dem > -9999, pcr.boolean(0), pcr.boolean(1)), pcr.boolean(1))
    # find number of boundary conditions in area where dem_novalue
    pcr.setglobaloption("nondiagonal")  # only top, bottom, left, right
    area_nbounds = pcr.areatotal(pcr.scalar(points), pcr.clump(dem_NoDataValues)) * pcr.scalar(dem_NoDataValues)
    pcr.setglobaloption("diagonal")  # diagonal again
    # make sea (True) and land (False) mask
    if np.any(pcr.pcr2numpy(area_nbounds,-9999) > 0):
        sea = pcr.ifthenelse(area_nbounds > 0, pcr.boolean(1), pcr.boolean(0))
    else:
        sea = dem_NoDataValues
    # find coast based on sea in neighboring cells and at land (sea = 0)
    coast = pcr.ifthenelse((pcr.window4total(pcr.scalar(sea)) > pcr.scalar(0)) & (sea == pcr.boolean(0)),
                           pcr.boolean(1), pcr.boolean(0))

    # move points to nearest sea cell(s)
    point_dist = pcr.ifthenelse(sea, pcr.spread(points, 0, 1), 1E31)  # distance from each point for sea cells
    nnpoints = pcr.ifthenelse(sea, pcr.spreadzone(points, 0, 1), 0)  # closest point for sea cells
    dist2sea = pcr.areaminimum(point_dist, nnpoints)  # shortest distance to each point to sea
    points_in_sea = pcr.nominal(pcr.ifthenelse(dist2sea == point_dist, nnpoints, 0))  # map points to nearest sea cell

    # map point at sea to coastline according to shortest distance over sea
    res = pcr.ifthenelse((pcr.scalar(sea) + pcr.scalar(coast)) >= 1, pcr.scalar(1), 1E31)  # mask out non sea or coast cells
    ids_coastline = pcr.scalar(pcr.spreadzone(points_in_sea, 0, res)) * pcr.scalar(coast)

    return ids_coastline, points_in_sea
    def evaluateAllModelResults(self,globalCloneMapFileName,\
                                catchmentClassFileName,\
                                lddMapFileName,\
                                cellAreaMapFileName,\
                                pcrglobwb_output,\
                                analysisOutputDir="",\
                                tmpDir = None):     

        # temporary directory
        if tmpDir == None: tmpDir = self.tmpDir+"/edwin_grdc_"
        
        # output directory for all analyses for all stations
        analysisOutputDir   = str(analysisOutputDir)
        self.chartOutputDir = analysisOutputDir+"/chart/"
        self.tableOutputDir = analysisOutputDir+"/table/"
        #
        if analysisOutputDir == "": self.chartOutputDir = "chart/"
        if analysisOutputDir == "": self.tableOutputDir = "table/"
        #
        # make the chart and table directories:
        os.system('rm -r '+self.chartOutputDir+"*")
        os.system('rm -r '+self.tableOutputDir+"*")
        os.makedirs(self.chartOutputDir)
        os.makedirs(self.tableOutputDir)
        
        # cloneMap for all pcraster operations
        pcr.setclone(globalCloneMapFileName)
        cloneMap = pcr.boolean(1)
        self.cell_size_in_arc_degree = vos.getMapAttributesALL(globalCloneMapFileName)['cellsize']
        
        lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))
        cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))
        
        # The landMaskClass map contains the nominal classes for all landmask regions. 
        landMaskClass = pcr.nominal(cloneMap)  # default: if catchmentClassFileName is not given
        if catchmentClassFileName != None:
            landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))

        # model catchment areas and cordinates
        catchmentAreaAll = pcr.catchmenttotal(cellArea, lddMap) / (1000*1000)  # unit: km2
        xCoordinate = pcr.xcoordinate(cloneMap)
        yCoordinate = pcr.ycoordinate(cloneMap)
        
        for id in self.list_of_grdc_ids: 

            logger.info("Evaluating simulated discharge to the grdc observation at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".")
            
            # identify model pixel
            self.identifyModelPixel(tmpDir,catchmentAreaAll,landMaskClass,xCoordinate,yCoordinate,str(id))

            # evaluate model results to GRDC data
            self.evaluateModelResultsToGRDC(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)
            
        # write the summary to a table 
        summary_file = analysisOutputDir+"summary.txt"
        #
        logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".")
        #
        # prepare the file:
        summary_file_handle = open(summary_file,"w")
        #
        # write the header
        summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n")
        #
        # write the content
        for id in self.list_of_grdc_ids:
            rowLine  = ""
            for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";"   
            rowLine = rowLine[0:-1] + "\n"
            summary_file_handle.write(rowLine)
        summary_file_handle.close()           
Esempio n. 44
0
  def _parseLine(self, line, lineNumber, nrColumns, externalNames, keyDict):

    line = re.sub("\n","",line)
    line = re.sub("\t"," ",line)
    result = None

    # read until first comment
    content = ""
    content,sep,comment = line.partition("#")
    if len(content) > 1:
      collectionVariableName, sep, tail = content.partition(" ")
      if collectionVariableName == self._varName:
        tail = tail.strip()
        key, sep, variableValue = tail.rpartition(" ")

        if len(key.split()) != nrColumns:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, order of columns given (%s columns) does not match expected order of %s columns" %(self._fileName, lineNumber, len(key.split()) + 2, int(nrColumns) + 2)
          raise ValueError(msg)

        variableValue = re.sub('\"', "", variableValue)

        tmp = None
        try:
          tmp = int(variableValue)
          if self._dataType == pcraster.Boolean:
            tmp = pcraster.boolean(tmp)
          elif self._dataType == pcraster.Nominal:
            tmp = pcraster.nominal(tmp)
          elif self._dataType == pcraster.Ordinal:
            tmp = pcraster.ordinal(tmp)
          elif self._dataType == pcraster.Ldd:
            tmp = pcraster.ldd(tmp)
          else:
            msg = "Conversion to %s failed" % (self._dataType)
            raise Exception(msg)
        except ValueError as e:
          try:
            tmp = float(variableValue)
            if self._dataType == pcraster.Scalar:
              tmp = pcraster.scalar(tmp)
            elif self._dataType == pcraster.Directional:
              tmp = pcraster.directional(tmp)
            else:
              msg = "Conversion to %s failed" % (self._dataType)
              raise Exception(msg)

          except ValueError as e:
            variableValue = re.sub("\\\\","/",variableValue)
            variableValue = variableValue.strip()
            path = os.path.normpath(variableValue)
            try:
              tmp = pcraster.readmap(path)
            except RuntimeError as e:
              msg = "Error reading %s line %d, %s" %(self._fileName, lineNumber, e)
              raise ValueError(msg)

        # test if key is an external name
        transformedKeys = []
        counter = 0

        for k in key.split():
          k = k.strip()
          if externalNames[counter].get(k):
            transformedKeys.append(externalNames[counter].get(k))
          else:
            transformedKeys.append(k)
          counter += 1

        key = tuple(transformedKeys)

        if not key in keyDict:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, %s unknown collection index" %(self._fileName, lineNumber, tmp)
          raise ValueError(msg)


        if not keyDict[key] is None:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, %s %s already initialised" %(self._fileName, lineNumber, self._varName, tmp)
          raise ValueError(msg)

        keyDict[key] = tmp
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask  = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))
# - cell area
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'],
                          clone_map_file,
                          output_files['tmp_folder'])


# read the hydrological year 
msg = "Reading the hydrological year types" + ":"
logger.info(msg)
hydro_year_type = pcr.nominal(\
                  vos.readPCRmapClone(input_files['hydro_year_05min'],
                                      input_files['clone_map_05min'],
                                      output_files['tmp_folder'],
                                      None, False, None, True))
hydro_year_type = pcr.cover(hydro_year_type, pcr.nominal(1.0))




# loop through every year
msg = "Merging two hydrologyical years for the following variables:"
logger.info(msg)
#
for i_year in range(str_year, end_year + 1):
    
    for var in variable_names:
        
Esempio n. 46
0
 def testScalar2Nominal(self):
   scalarMap = pcraster.readmap("abs_Expr.map")
   self.assertEqual(scalarMap.dataType(), pcraster.VALUESCALE.Scalar)
   nominalMap = pcraster.nominal(scalarMap)
   self.assertEqual(nominalMap.dataType(), pcraster.VALUESCALE.Nominal)
Esempio n. 47
0
def subcatch_order_b(
    ldd, oorder, sizelimit=0, fill=False, fillcomplete=False, stoporder=0
):
    """
    Determines subcatchments using the catchment order

    This version tries to keep the number op upstream/downstream catchment the
    small by first dederivingatchment connected to the major river(the order) given, and fill
    up from there.

    Input:
        - ldd
        - oorder - order to use
        - sizelimit - smallest catchments to include, default is all (sizelimit=0) in number of cells
        - if fill is set to True the higer order catchment are filled also
        - if fillcomplete is set to True the whole ldd is filled with catchments.


    :returns sc, dif, nldd; Subcatchment, Points, subcatchldd
    """
    # outl = find_outlet(ldd)
    # large = pcr.subcatchment(ldd,pcr.boolean(outl))

    if stoporder == 0:
        stoporder = oorder

    stt = pcr.streamorder(ldd)
    sttd = pcr.downstream(ldd, stt)
    pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(stt)) > 0.0, sttd)
    maxorder = pcraster.framework.getCellValue(pcr.mapmaximum(stt), 1, 1)
    dif = pcr.uniqueid(pcr.boolean(pcr.ifthen(stt == pcr.ordinal(oorder), pts)))

    if fill:
        for order in range(oorder, maxorder):
            m_pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(order)) > 0.0, sttd)
            m_dif = pcr.uniqueid(
                pcr.boolean(pcr.ifthen(stt == pcr.ordinal(order), m_pts))
            )
            dif = pcr.uniqueid(pcr.boolean(pcr.cover(m_dif, dif)))

        for myorder in range(oorder - 1, stoporder, -1):
            sc = pcr.subcatchment(ldd, pcr.nominal(dif))
            m_pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(stt)) > 0.0, sttd)
            m_dif = pcr.uniqueid(
                pcr.boolean(pcr.ifthen(stt == pcr.ordinal(myorder - 1), m_pts))
            )
            dif = pcr.uniqueid(
                pcr.boolean(pcr.cover(pcr.ifthen(pcr.scalar(sc) == 0, m_dif), dif))
            )

        if fillcomplete:
            sc = pcr.subcatchment(ldd, pcr.nominal(dif))
            cs, m_dif, stt = subcatch_order_a(ldd, stoporder)
            dif = pcr.uniqueid(
                pcr.boolean(
                    pcr.cover(
                        pcr.ifthen(pcr.scalar(sc) == 0, pcr.ordinal(m_dif)),
                        pcr.ordinal(dif),
                    )
                )
            )

    scsize = pcr.catchmenttotal(1, ldd)
    dif = pcr.ordinal(pcr.uniqueid(pcr.boolean(pcr.ifthen(scsize >= sizelimit, dif))))
    sc = pcr.subcatchment(ldd, dif)

    # Make pit ldd
    nldd = pcr.lddrepair(pcr.ifthenelse(pcr.cover(dif, 0) > 0, 5, ldd))

    return sc, dif, nldd
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask  = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))
# - cell area
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'],
                          clone_map_file,
                          output_files['tmp_folder'])


# set the basin map
msg = "Setting the basin map" + ":"
logger.info(msg)
basin_map = pcr.nominal(\
            vos.readPCRmapClone(input_files['basin_map_05min'],
                                input_files['clone_map_05min'],
                                output_files['tmp_folder'],
                                None, False, None, True))
#~ pcr.aguila(basin_map)
# - extend/extrapolate the basin
basin_map = pcr.cover(basin_map, pcr.windowmajority(basin_map, 0.5))
basin_map = pcr.cover(basin_map, pcr.windowmajority(basin_map, 0.5))
basin_map = pcr.cover(basin_map, pcr.windowmajority(basin_map, 0.5))
basin_map = pcr.cover(basin_map, pcr.windowmajority(basin_map, 1.0))
basin_map = pcr.cover(basin_map, pcr.windowmajority(basin_map, 1.5))
basin_map = pcr.ifthen(landmask, basin_map)
#~ pcr.aguila(basin_map)

msg = "Redefining the basin map (so that it is consistent with the ldd map used in PCR-GLOBWB):"
logger.info(msg)
# - calculate the upstream area of every pixel:
Esempio n. 49
0
 def testOrdinal2Nominal(self):
   ordinalMap = pcraster.ordinal(pcraster.readmap("areaarea_Class.map"))
   self.assertEqual(ordinalMap.dataType(), pcraster.VALUESCALE.Ordinal)
   nominalMap = pcraster.nominal(ordinalMap)
   pcraster.report(nominalMap, "nominal.map")
   self.assertEqual(nominalMap.dataType(), pcraster.VALUESCALE.Nominal)
Esempio n. 50
0
    def getParameterFiles(
        self, currTimeStep, cellArea, ldd, initial_condition_dictionary=None
    ):

        # parameters for Water Bodies: fracWat
        #                              waterBodyIds
        #                              waterBodyOut
        #                              waterBodyArea
        #                              waterBodyTyp
        #                              waterBodyCap

        # cell surface area (m2) and ldd
        self.cellArea = cellArea
        ldd = pcr.ifthen(self.landmask, ldd)

        # date used for accessing/extracting water body information
        date_used = currTimeStep.fulldate
        year_used = currTimeStep.year
        if self.onlyNaturalWaterBodies == True:
            date_used = self.dateForNaturalCondition
            year_used = self.dateForNaturalCondition[0:4]

        # fracWat = fraction of surface water bodies (dimensionless)
        self.fracWat = pcr.scalar(0.0)

        if self.useNetCDF:
            self.fracWat = vos.netcdf2PCRobjClone(
                self.ncFileInp,
                "fracWaterInp",
                date_used,
                useDoy="yearly",
                cloneMapFileName=self.cloneMap,
            )
        else:
            self.fracWat = vos.readPCRmapClone(
                self.fracWaterInp + str(year_used) + ".map",
                self.cloneMap,
                self.tmpDir,
                self.inputDir,
            )

        self.fracWat = pcr.cover(self.fracWat, 0.0)
        self.fracWat = pcr.max(0.0, self.fracWat)
        self.fracWat = pcr.min(1.0, self.fracWat)

        self.waterBodyIds = pcr.nominal(0)  # waterBody ids
        self.waterBodyOut = pcr.boolean(0)  # waterBody outlets
        self.waterBodyArea = pcr.scalar(0.0)  # waterBody surface areas

        # water body ids
        if self.useNetCDF:
            self.waterBodyIds = vos.netcdf2PCRobjClone(
                self.ncFileInp,
                "waterBodyIds",
                date_used,
                useDoy="yearly",
                cloneMapFileName=self.cloneMap,
            )
        else:
            self.waterBodyIds = vos.readPCRmapClone(
                self.waterBodyIdsInp + str(year_used) + ".map",
                self.cloneMap,
                self.tmpDir,
                self.inputDir,
                False,
                None,
                True,
            )
        #
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0, pcr.nominal(self.waterBodyIds)
        )

        # water body outlets (correcting outlet positions)
        wbCatchment = pcr.catchmenttotal(pcr.scalar(1), ldd)
        self.waterBodyOut = pcr.ifthen(
            wbCatchment == pcr.areamaximum(wbCatchment, self.waterBodyIds),
            self.waterBodyIds,
        )  # = outlet ids
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0, self.waterBodyOut
        )
        # TODO: Please also consider endorheic lakes!

        # correcting water body ids
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0,
            pcr.subcatchment(ldd, self.waterBodyOut),
        )

        # boolean map for water body outlets:
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyOut) > 0.0, pcr.boolean(1)
        )

        # reservoir surface area (m2):
        if self.useNetCDF:
            resSfArea = (
                1000.0
                * 1000.0
                * vos.netcdf2PCRobjClone(
                    self.ncFileInp,
                    "resSfAreaInp",
                    date_used,
                    useDoy="yearly",
                    cloneMapFileName=self.cloneMap,
                )
            )
        else:
            resSfArea = (
                1000.0
                * 1000.0
                * vos.readPCRmapClone(
                    self.resSfAreaInp + str(year_used) + ".map",
                    self.cloneMap,
                    self.tmpDir,
                    self.inputDir,
                )
            )
        resSfArea = pcr.areaaverage(resSfArea, self.waterBodyIds)
        resSfArea = pcr.cover(resSfArea, 0.0)

        # water body surface area (m2): (lakes and reservoirs)
        self.waterBodyArea = pcr.max(
            pcr.areatotal(
                pcr.cover(self.fracWat * self.cellArea, 0.0), self.waterBodyIds
            ),
            pcr.areaaverage(pcr.cover(resSfArea, 0.0), self.waterBodyIds),
        )
        self.waterBodyArea = pcr.ifthen(self.waterBodyArea > 0.0, self.waterBodyArea)

        # correcting water body ids and outlets (exclude all water bodies with surfaceArea = 0)
        self.waterBodyIds = pcr.ifthen(self.waterBodyArea > 0.0, self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(
            pcr.boolean(self.waterBodyIds), self.waterBodyOut
        )

        # water body types:
        # - 2 = reservoirs (regulated discharge)
        # - 1 = lakes (weirFormula)
        # - 0 = non lakes or reservoirs (e.g. wetland)
        self.waterBodyTyp = pcr.nominal(0)

        if self.useNetCDF:
            self.waterBodyTyp = vos.netcdf2PCRobjClone(
                self.ncFileInp,
                "waterBodyTyp",
                date_used,
                useDoy="yearly",
                cloneMapFileName=self.cloneMap,
            )
        else:
            self.waterBodyTyp = vos.readPCRmapClone(
                self.waterBodyTypInp + str(year_used) + ".map",
                self.cloneMap,
                self.tmpDir,
                self.inputDir,
                False,
                None,
                True,
            )

        # excluding wetlands (waterBodyTyp = 0) in all functions related to lakes/reservoirs
        #
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, pcr.nominal(self.waterBodyTyp)
        )
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0, pcr.nominal(self.waterBodyTyp)
        )
        self.waterBodyTyp = pcr.areamajority(
            self.waterBodyTyp, self.waterBodyIds
        )  # choose only one type: either lake or reservoir
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, pcr.nominal(self.waterBodyTyp)
        )
        self.waterBodyTyp = pcr.ifthen(
            pcr.boolean(self.waterBodyIds), self.waterBodyTyp
        )

        # correcting lakes and reservoirs ids and outlets
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0, self.waterBodyIds
        )
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0, self.waterBodyOut
        )

        # reservoir maximum capacity (m3):
        self.resMaxCap = pcr.scalar(0.0)
        self.waterBodyCap = pcr.scalar(0.0)

        if self.useNetCDF:
            self.resMaxCap = (
                1000.0
                * 1000.0
                * vos.netcdf2PCRobjClone(
                    self.ncFileInp,
                    "resMaxCapInp",
                    date_used,
                    useDoy="yearly",
                    cloneMapFileName=self.cloneMap,
                )
            )
        else:
            self.resMaxCap = (
                1000.0
                * 1000.0
                * vos.readPCRmapClone(
                    self.resMaxCapInp + str(year_used) + ".map",
                    self.cloneMap,
                    self.tmpDir,
                    self.inputDir,
                )
            )

        self.resMaxCap = pcr.ifthen(self.resMaxCap > 0, self.resMaxCap)
        self.resMaxCap = pcr.areaaverage(self.resMaxCap, self.waterBodyIds)

        # water body capacity (m3): (lakes and reservoirs)
        self.waterBodyCap = pcr.cover(
            self.resMaxCap, 0.0
        )  # Note: Most of lakes have capacities > 0.
        self.waterBodyCap = pcr.ifthen(
            pcr.boolean(self.waterBodyIds), self.waterBodyCap
        )

        # correcting water body types:                                  # Reservoirs that have zero capacities will be assumed as lakes.
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0.0, self.waterBodyTyp
        )
        self.waterBodyTyp = pcr.ifthenelse(
            self.waterBodyCap > 0.0,
            self.waterBodyTyp,
            pcr.ifthenelse(
                pcr.scalar(self.waterBodyTyp) == 2, pcr.nominal(1), self.waterBodyTyp
            ),
        )

        # final corrections:
        self.waterBodyTyp = pcr.ifthen(
            self.waterBodyArea > 0.0, self.waterBodyTyp
        )  # make sure that all lakes and/or reservoirs have surface areas
        self.waterBodyTyp = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0.0, self.waterBodyTyp
        )  # make sure that only types 1 and 2 will be considered in lake/reservoir functions
        self.waterBodyIds = pcr.ifthen(
            pcr.scalar(self.waterBodyTyp) > 0.0, self.waterBodyIds
        )  # make sure that all lakes and/or reservoirs have ids
        self.waterBodyOut = pcr.ifthen(
            pcr.scalar(self.waterBodyIds) > 0.0, self.waterBodyOut
        )  # make sure that all lakes and/or reservoirs have outlets

        # for a natural run (self.onlyNaturalWaterBodies == True)
        # which uses only the year 1900, assume all reservoirs are lakes
        if (
            self.onlyNaturalWaterBodies == True
            and date_used == self.dateForNaturalCondition
        ):
            logger.info(
                "Using only natural water bodies identified in the year 1900. All reservoirs in 1900 are assumed as lakes."
            )
            self.waterBodyTyp = pcr.ifthen(
                pcr.scalar(self.waterBodyTyp) > 0.0, pcr.nominal(1)
            )

        # check that all lakes and/or reservoirs have types, ids, surface areas and outlets:
        test = (
            pcr.defined(self.waterBodyTyp)
            & pcr.defined(self.waterBodyArea)
            & pcr.defined(self.waterBodyIds)
            & pcr.boolean(
                pcr.areamaximum(pcr.scalar(self.waterBodyOut), self.waterBodyIds)
            )
        )
        a, b, c = vos.getMinMaxMean(pcr.cover(pcr.scalar(test), 1.0) - pcr.scalar(1.0))
        threshold = 1e-3
        if abs(a) > threshold or abs(b) > threshold:
            logger.warning("Missing information in some lakes and/or reservoirs.")

        # at the beginning of simulation period (timeStepPCR = 1)
        # - we have to define/get the initial conditions
        #
        if currTimeStep.timeStepPCR == 1:
            self.getICs(initial_condition_dictionary)

        # For each new reservoir (introduced at the beginning of the year)
        # initiating storage, average inflow and outflow
        #
        self.waterBodyStorage = pcr.cover(self.waterBodyStorage, 0.0)
        self.avgInflow = pcr.cover(self.avgInflow, 0.0)
        self.avgOutflow = pcr.cover(self.avgOutflow, 0.0)

        # cropping only in the landmask region:
        self.fracWat = pcr.ifthen(self.landmask, self.fracWat)
        self.waterBodyIds = pcr.ifthen(self.landmask, self.waterBodyIds)
        self.waterBodyOut = pcr.ifthen(self.landmask, self.waterBodyOut)
        self.waterBodyArea = pcr.ifthen(self.landmask, self.waterBodyArea)
        self.waterBodyTyp = pcr.ifthen(self.landmask, self.waterBodyTyp)
        self.waterBodyCap = pcr.ifthen(self.landmask, self.waterBodyCap)
        self.waterBodyStorage = pcr.ifthen(self.landmask, self.waterBodyStorage)
        self.avgInflow = pcr.ifthen(self.landmask, self.avgInflow)
        self.avgOutflow = pcr.ifthen(self.landmask, self.avgOutflow)
    index = 0 # for posCnt
    
    # set clone and define land mask region
    pcr.setclone(landmask05minFile)
    landmask = pcr.defined(pcr.readmap(landmask05minFile))
    landmask = pcr.ifthen(landmask, landmask)
    
    # cell area at 5 arc min resolution
    cellArea = vos.readPCRmapClone(cellArea05minFile,
                                   cloneMapFileName,tmp_directory)
    cellArea = pcr.ifthen(landmask,cellArea)
    
    # ids for every 30 arc min grid:
    uniqueIDs30min = vos.readPCRmapClone(uniqueIDs30minFile,
                                         cloneMapFileName,tmp_directory) 
    uniqueIDs30min = pcr.nominal(pcr.ifthen(landmask, uniqueIDs30min))
    
    for iYear in range(staYear,endYear+1):
        for iMonth in range(1,12+1):
            timeStamp = datetime.datetime(int(iYear),int(iMonth),int(1),int(0))

            fulldate = '%4i-%02i-%02i' %(int(iYear),int(iMonth),int(1))
            print fulldate

            monthRange = float(calendar.monthrange(int(iYear), int(iMonth))[1])
            print(monthRange)
            
            index = index + 1
            for iVar in range(0,len(varNames)):      
                
                # reading values from the input netcdf files (30min)
landmask = pcr.defined(pcr.readmap(clone_map))

# class map file name:
#~ class_map_file_name = "/home/sutan101/data/aqueduct_gis_layers/aqueduct_shp_from_marta/Aqueduct_States.map"
#~ class_map_file_name = "/home/sutan101/data/aqueduct_gis_layers/aqueduct_shp_from_marta/Aqueduct_GDBD.map"
#~ class_map_file_name = "/home/sutan101/data/processing_whymap/version_19september2014/major_aquifer_30min.extended.map"
#~ class_map_file_name = "/home/sutan101/data/processing_whymap/version_19september2014/major_aquifer_30min.map"
class_map_file_name = str(sys.argv[2])
class_map_default_folder = "/home/sutan101/data/aqueduct_gis_layers/aqueduct_shp_from_marta/" 
if class_map_file_name == "state": class_map_file_name = class_map_default_folder + "/Aqueduct_States.map"
if class_map_file_name == "drainage_unit": class_map_file_name = class_map_default_folder + "/Aqueduct_GDBD.map"
if class_map_file_name == "aquifer": class_map_file_name = class_map_default_folder + "/why_wgs1984_BUENO.map"
if class_map_file_name == "country": class_map_file_name = "/home/sutan101/data/country_shp_from_tianyi/World_Polys_High.map"

# reading the class map
class_map    = pcr.nominal(pcr.uniqueid(landmask))
if class_map_file_name != "pixel":
	class_map = vos.readPCRmapClone(class_map_file_name, clone_map, tmp_directory, None, False, None, True, False)
	class_map = pcr.ifthen(pcr.scalar(class_map) > 0.0, pcr.nominal(class_map)) 

# time selection
start_year = int(sys.argv[3])
end_year   = int(sys.argv[4])
 
# cell_area (unit: m2)
cell_area = pcr.readmap("/data/hydroworld/PCRGLOBWB20/input5min/routing/cellsize05min.correct.map") 
segment_cell_area = pcr.areatotal(cell_area, class_map)

# extent of aquifer/sedimentary basins:
sedimentary_basin = pcr.cover(pcr.scalar(pcr.readmap("/home/sutan101/data/sed_extent/sed_extent.map")), 0.0)
cell_area = sedimentary_basin * cell_area
Esempio n. 53
0
    attributeDictionary['comment'    ]  = "None"
    # additional attribute defined in PCR-GLOBWB 
    attributeDictionary['description'] = "prepared by Edwin H. Sutanudjaja"

    # initiate the netcd object: 
    tssNetCDF = MakingNetCDF(cloneMapFile = cloneMapFileName, \
                             attribute = attributeDictionary, \
                             cellSizeInArcMinutes = cellSizeInArcMinutes)
    # making netcdf files:
    for var in variable_names:
        tssNetCDF.createNetCDF(output[var]['file_name'], var, output[var]['unit'])

    # class (country) ids
    uniqueIDsFile = "/projects/0/dfguu/users/edwin/data/country_shp_from_tianyi/World_Polys_High.map"
    uniqueIDs = pcr.nominal(\
                vos.readPCRmapClone(uniqueIDsFile, cloneMapFileName, tmp_directory, 
                                    None, False, None, True))
    uniqueIDs = pcr.readmap(uniqueIDsFile)
    uniqueIDs = pcr.ifthen(pcr.scalar(uniqueIDs) >= 0.0, uniqueIDs)
    
    # landmask                               
    landmask = pcr.defined(pcr.readmap(landmask05minFile))
    landmask = pcr.ifthen(landmask, landmask)
    # - extending landmask with uniqueIDs
    landmask = pcr.cover(landmask, pcr.defined(uniqueIDs))
    
    # extending class (country) ids
    max_step = 5
    for i in range(1, max_step+1, 1):
        cmd = "Extending class: step "+str(i)+" from " + str(max_step)
        print(cmd)
    def evaluateAllBaseflowResults(self,globalCloneMapFileName,\
                                   catchmentClassFileName,\
                                   lddMapFileName,\
                                   cellAreaMapFileName,\
                                   pcrglobwb_output,\
                                   analysisOutputDir="",\
                                   tmpDir = None):     

        # temporary directory
        if tmpDir == None: tmpDir = self.tmpDir+"/edwin_iwmi_"

        # output directory for all analyses for all stations
        analysisOutputDir   = str(analysisOutputDir)
        self.chartOutputDir = analysisOutputDir+"/chart/"
        self.tableOutputDir = analysisOutputDir+"/table/"
        #
        if analysisOutputDir == "": self.chartOutputDir = "chart/"
        if analysisOutputDir == "": self.tableOutputDir = "table/"
        #
        # make the chart and table directories:
        os.system('rm -r '+self.chartOutputDir+"*")
        os.system('rm -r '+self.tableOutputDir+"*")
        os.makedirs(self.chartOutputDir)
        os.makedirs(self.tableOutputDir)
        
        # cloneMap for all pcraster operations
        pcr.setclone(globalCloneMapFileName)
        cloneMap = pcr.boolean(1)
        
        lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))
        cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))
        
        # The landMaskClass map contains the nominal classes for all landmask regions. 
        landMaskClass = pcr.nominal(cloneMap)  # default: if catchmentClassFileName is not given
        if catchmentClassFileName != None:
            landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))

        for id in self.list_of_grdc_ids: 

            logger.info("Evaluating simulated annual baseflow time series to IWMI baseflow time series at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".")
            
            # evaluate model results to GRDC data
            self.evaluateBaseflowResult(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)
            
        # write the summary to a table 
        summary_file = analysisOutputDir+"baseflow_summary.txt"
        #
        logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".")
        #
        # prepare the file:
        summary_file_handle = open(summary_file,"w")
        #
        # write the header
        summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n")
        #
        # write the content
        for id in self.list_of_grdc_ids:
            rowLine  = ""
            for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";"   
            rowLine = rowLine[0:-1] + "\n"
            summary_file_handle.write(rowLine)
        summary_file_handle.close()           
                          clone_map_file,
                          output_files['tmp_folder'],
                          None,
                          True)
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask  = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))

# set the the cell id for every half arc-degree cell 
msg = "Setting the cell id for every half arc-degree cell" + ":"
logger.info(msg)
cell_ids_30min = pcr.nominal(\
                 vos.readPCRmapClone(input_files['cell_ids_30min' ],
                                     clone_map_file,
                                     output_files['tmp_folder'],
                                     None,
                                     False,
                                     None,
                                     True))

# set the cell areas for 5 arc-min and half arc-degree cells 
msg = "Setting the cell areas for 5 arc-min and half arc-degree cells" + ":"
logger.info(msg)
# - 5 arc-min
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'],
                          clone_map_file,
                          output_files['tmp_folder'])
cell_area = pcr.ifthen(landmask, cell_area)
# - 30 arc-min
cell_area_30min = pcr.areatotal(cell_area, cell_ids_30min)
Esempio n. 56
0
def subcatch_stream(ldd, threshold, stream=None, min_strahler=-999, max_strahler=999, assign_edge=False, assign_existing=False, up_area=None, basin=None):
    """
    Derive catchments based upon strahler threshold
    Input:
        ldd -- pcraster object direction, local drain directions
        threshold -- integer, strahler threshold, subcatchments ge threshold
            are derived
        stream=None -- pcraster object ordinal, stream order map (made with pcr.streamorder), if provided, stream order
            map is not generated on the fly but used from this map. Useful when a subdomain within a catchment is
            provided, which would cause edge effects in the stream order map
        min_strahler=-999 -- integer, minimum strahler threshold of river catchments
            to return
        max_strahler=999 -- integer, maximum strahler threshold of river catchments
            to return
        assign_unique=False -- if set to True, unassigned connected areas at
            the edges of the domain are assigned a unique id as well. If set
            to False, edges are not assigned
        assign_existing=False == if set to True, unassigned edges are assigned
            to existing basins with an upstream weighting. If set to False,
            edges are assigned to unique IDs, or not assigned
    output:
        stream_ge -- pcraster object, streams of strahler order ge threshold
        subcatch -- pcraster object, subcatchments of strahler order ge threshold

    """
    # derive stream order

    if stream is None:
        stream = pcr.streamorder(ldd)

    stream_ge = pcr.ifthen(stream >= threshold, stream)
    stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
    # detect any transfer of strahler order, to a higher strahler order.
    transition_strahler = pcr.ifthenelse(pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
                                         pcr.ifthenelse(pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse(pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1),
                                                                                                              pcr.boolean(0))))
    # make unique ids (write to file)
    transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))

    # derive upstream catchment areas (write to file)
    subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
    # mask out areas outside basin
    if basin is not None:
        subcatch = pcr.ifthen(basin, subcatch)

    if assign_edge:
        # fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far
        unique_edge = pcr.clump(pcr.ifthen(subcatch==0, pcr.ordinal(0)))
        subcatch = pcr.ifthenelse(subcatch==0, pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)), pcr.nominal(subcatch))
    elif assign_existing:
        # unaccounted areas are added to largest nearest draining basin
        if up_area is None:
            up_area = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1))
        riverid = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), subcatch)

        friction = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)) # *(pcr.scalar(ldd)*0+1)
        delta = pcr.ifthen(pcr.scalar(ldd)>=0, pcr.ifthen(pcr.cover(subcatch, 0)==0, pcr.spreadzone(pcr.cover(riverid, 0), 0, friction)))
        subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)),
                                      subcatch,
                                      delta)

    # finally, only keep basins with minimum and maximum river order flowing through them
    strahler_subcatch = pcr.areamaximum(stream, subcatch)
    subcatch = pcr.ifthen(pcr.ordinal(strahler_subcatch) >= min_strahler, pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch))

    return stream_ge, pcr.ordinal(subcatch)