示例#1
0
def getRowColPoint(in_map, xcor, ycor):
    """
    returns the row and col in a map at the point given.
    Works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - row, column
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_

    return point.argmax(0).max(), point.argmax(1).max()
示例#2
0
def getRowColPoint(in_map, xcor, ycor):
    """
    returns the row and col in a map at the point given.
    Works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - row, column
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_

    return point.argmax(0).max(), point.argmax(1).max()
示例#3
0
    def __init__(self, netcdffile, logging):
        """
        First try to setup a class read netcdf files
        (converted with pcr2netcdf.py)

        netcdffile: file to read the forcing data from
        logging: python logging object
        vars: list of variables to get from file
        """

        if os.path.exists(netcdffile):
            self.dataset = netCDF4.Dataset(netcdffile, mode="r")
        else:
            msg = os.path.abspath(netcdffile) + " not found!"
            logging.error(msg)
            raise ValueError(msg)

        try:
            self.x = self.dataset.variables["x"][:]
        except:
            self.x = self.dataset.variables["lon"][:]
        # Now check Y values to see if we must flip the data
        try:
            self.y = self.dataset.variables["y"][:]
        except:
            self.y = self.dataset.variables["lat"][:]

        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0]

        (self.latidx,) = np.logical_and(self.x >= x.min(), self.x < x.max()).nonzero()
        (self.lonidx,) = np.logical_and(self.y >= x.min(), self.y < y.max()).nonzero()

        logging.info("Reading static input from netCDF file: " + netcdffile)
示例#4
0
    def set_latlon_based_on_cloneMapFileName(self, cloneMapFileName):

        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))
        
        # properties of the clone maps
        # - numbers of rows and colums
        rows = pcr.clone().nrRows() 
        cols = pcr.clone().nrCols()
        # - cell size in arc minutes rounded to one value behind the decimal
        cellSizeInArcMin = round(pcr.clone().cellSize() * 60.0, 1) 
        # - cell sizes in ar degrees for longitude and langitude direction 
        deltaLon = cellSizeInArcMin / 60.
        deltaLat = deltaLon
        # - coordinates of the upper left corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_min = round(pcr.clone().west(), 2)
        y_max = round(pcr.clone().north(), 2)
        # - coordinates of the lower right corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_max = round(x_min + cols*deltaLon, 2) 
        y_min = round(y_max - rows*deltaLat, 2) 
        
        # cell centres coordinates
        longitudes = np.arange(x_min + deltaLon/2., x_max, deltaLon)
        latitudes  = np.arange(y_max - deltaLat/2., y_min,-deltaLat)

        #~ # cell centres coordinates
        #~ longitudes = np.linspace(x_min + deltaLon/2., x_max - deltaLon/2., cols)
        #~ latitudes  = np.linspace(y_max - deltaLat/2., y_min + deltaLat/2., rows)
        
        #~ # cell centres coordinates (latitudes and longitudes, directly from the clone maps)
        #~ longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        #~ latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]

        return longitudes, latitudes, cellSizeInArcMin  
    def __init__(self,
                 cloneMapFileName,
                 resetClone=None,
                 attributeDictionary=None):

        # cloneMap
        if resetClone != None: pcr.setclone(cloneMapFileName)
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # reset clone (if necessary)
        if resetClone != None: pcr.setclone(resetClone)

        # netcdf format:
        self.format = 'NETCDF3_CLASSIC'

        self.attributeDictionary = {}
        if attributeDictionary == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'] = "None"
            self.attributeDictionary['source'] = "None"
            self.attributeDictionary['history'] = "None"
            self.attributeDictionary['references'] = "None"
            self.attributeDictionary['description'] = "None"
            self.attributeDictionary['comment'] = "None"
        else:
            self.attributeDictionary = attributeDictionary
示例#6
0
def upscale_riverlength(ldd, order, factor):
    """
    Upscales the riverlength using 'factor'
    The resulting maps can be resampled (e.g. using resample.exe) by factor and should
    include the accurate length as determined with the original higher
    resolution maps.  This function is **depricated**,
    use are_riverlength instead as this version
    is very slow for large maps

    Input:
        - ldd
        - minimum streamorder to include

    Output:
        - distance per factor cells
    """

    strorder = pcr.streamorder(ldd)
    strorder = pcr.ifthen(strorder >= order, strorder)
    dist = pcr.cover(
        pcr.max(pcr.celllength(),
                pcr.ifthen(pcr.boolean(strorder), pcr.downstreamdist(ldd))),
        0,
    )
    totdist = pcr.max(
        pcr.ifthen(
            pcr.boolean(strorder),
            pcr.windowtotal(pcr.ifthen(pcr.boolean(strorder), dist),
                            pcr.celllength() * factor),
        ),
        dist,
    )

    return totdist
示例#7
0
def subcatch_order_a(ldd, oorder):
    """
    Determines subcatchments using the catchment order

    This version uses the last cell BELOW order to derive the
    catchments. In general you want the _b version

    Input:
        - ldd
        - order - order to use

    Output:
        - map with catchment for the given streamorder
    """
    outl = find_outlet(ldd)
    large = pcr.subcatchment(ldd, pcr.boolean(outl))
    stt = pcr.streamorder(ldd)
    sttd = pcr.downstream(ldd, stt)
    pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(stt)) > 0.0, sttd)
    dif = pcr.upstream(
        ldd,
        pcr.cover(
            pcr.ifthen(
                large,
                pcr.uniqueid(pcr.boolean(pcr.ifthen(stt == pcr.ordinal(oorder), pts))),
            ),
            0,
        ),
    )
    dif = pcr.cover(pcr.scalar(outl), dif)  # Add catchment outlet
    dif = pcr.ordinal(pcr.uniqueid(pcr.boolean(dif)))
    sc = pcr.subcatchment(ldd, dif)

    return sc, dif, stt
示例#8
0
def subcatch_stream(ldd, stream, threshold):
    """
    Derive catchments based upon strahler threshold
    Input:
        ldd -- pcraster object direction, local drain directions
        stream -- pcraster object direction, streamorder
        threshold -- integer, strahler threshold, subcatchments ge threshold are
                 derived
    output:
        stream_ge -- pcraster object, streams of strahler order ge threshold
        subcatch -- pcraster object, subcatchments of strahler order ge threshold

    """
    # derive stream order

    # stream = pcr.streamorder(ldd)
    stream_ge = pcr.ifthen(stream >= threshold, stream)
    stream_up_sum = pcr.ordinal(
        pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
    # detect any transfer of strahler order, to a higher strahler order.
    transition_strahler = pcr.ifthenelse(
        pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
        pcr.ifthenelse(
            pcr.nominal(ldd) == 5, pcr.boolean(1),
            pcr.ifthenelse(
                pcr.downstream(ldd, pcr.scalar(stream_up_sum)) >
                pcr.scalar(stream_ge), pcr.boolean(1), pcr.boolean(0))))

    # make unique ids (write to file)
    transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))

    # derive upstream catchment areas (write to file)
    subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
    return stream_ge, subcatch
示例#9
0
    def set_latlon_based_on_cloneMapFileName(
            self, cloneMapFileName, netcdf_y_orientation_from_top_bottom=True):

        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # properties of the clone maps
        # - numbers of rows and colums
        rows = pcr.clone().nrRows()
        cols = pcr.clone().nrCols()
        # - cell size in arc minutes rounded to one value behind the decimal
        cellSizeInArcMin = round(pcr.clone().cellSize() * 60.0, 1)
        # - cell sizes in ar degrees for longitude and langitude direction
        deltaLon = cellSizeInArcMin / 60.
        deltaLat = deltaLon
        # - coordinates of the upper left corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_min = round(pcr.clone().west(), 2)
        y_max = round(pcr.clone().north(), 2)
        # - coordinates of the lower right corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_max = round(x_min + cols * deltaLon, 2)
        y_min = round(y_max - rows * deltaLat, 2)

        # cell centres coordinates
        longitudes = np.arange(x_min + deltaLon / 2., x_max, deltaLon)
        latitudes = np.arange(y_max - deltaLat / 2., y_min, -deltaLat)

        if netcdf_y_orientation_from_top_bottom == False:
            latitudes = latitudes[::-1]

        return longitudes, latitudes, cellSizeInArcMin
示例#10
0
    def test_03(self):
        """  nonspatial condition in ifthen """
        nr_rows = 2
        nr_cols = 3
        nr_cells = nr_rows * nr_cols
        pcraster.setclone(nr_rows, nr_cols, 5, 1, 1)


        raster = pcraster.ifthen(pcraster.boolean(1), pcraster.scalar(4.567))

        for idx in range(1, nr_cells + 1):
          value, isValid = pcraster.cellvalue(raster, idx)
          self.assertEqual(isValid, True)
          self.assertAlmostEqual(value, 4.567, 6)

        raster = pcraster.ifthen(pcraster.boolean(0), pcraster.scalar(4.567))

        for idx in range(1, nr_cells + 1):
          value, isValid = pcraster.cellvalue(raster, idx)
          self.assertEqual(isValid, False)


        raster = pcraster.ifthen(pcraster.scalar(1), pcraster.scalar(4.567))

        for idx in range(1, nr_cells + 1):
          value, isValid = pcraster.cellvalue(raster, idx)
          self.assertEqual(isValid, True)
          self.assertAlmostEqual(value, 4.567, 6)


        raster = pcraster.ifthen(pcraster.scalar(0), pcraster.scalar(4.567))

        for idx in range(1, nr_cells + 1):
          value, isValid = pcraster.cellvalue(raster, idx)
          self.assertEqual(isValid, False)
示例#11
0
def subcatch_order_a(ldd, oorder):
    """
    Determines subcatchments using the catchment order

    This version uses the last cell BELOW order to derive the
    catchments. In general you want the _b version

    Input:
        - ldd
        - order - order to use

    Output:
        - map with catchment for the given streamorder
    """
    outl = find_outlet(ldd)
    large = pcr.subcatchment(ldd, pcr.boolean(outl))
    stt = pcr.streamorder(ldd)
    sttd = pcr.downstream(ldd, stt)
    pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(stt)) > 0.0, sttd)
    dif = pcr.upstream(
        ldd,
        pcr.cover(
            pcr.ifthen(
                large,
                pcr.uniqueid(pcr.boolean(pcr.ifthen(stt == pcr.ordinal(oorder), pts))),
            ),
            0,
        ),
    )
    dif = pcr.cover(pcr.scalar(outl), dif)  # Add catchment outlet
    dif = pcr.ordinal(pcr.uniqueid(pcr.boolean(dif)))
    sc = pcr.subcatchment(ldd, dif)

    return sc, dif, stt
示例#12
0
def getCoordinates(cloneMap, MV=-9999):
    '''returns cell centre coordinates for a clone map as numpy array
	   return longitudes, latitudes '''
    cln = pcr.cover(pcr.boolean(cloneMap), pcr.boolean(1))
    xMap = pcr.xcoordinate(cln)
    yMap = pcr.ycoordinate(cln)
    return pcr.pcr2numpy(xMap, MV)[1, :], pcr.pcr2numpy(yMap, MV)[:, 1]
示例#13
0
    def set_latlon_based_on_cloneMapFileName(self, cloneMapFileName):

        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # properties of the clone maps
        # - numbers of rows and colums
        rows = pcr.clone().nrRows()
        cols = pcr.clone().nrCols()
        # - cell size in arc minutes rounded to one value behind the decimal
        cellSizeInArcMin = round(pcr.clone().cellSize() * 60.0, 1)
        # - cell sizes in ar degrees for longitude and langitude direction
        deltaLon = cellSizeInArcMin / 60.0
        deltaLat = deltaLon
        # - coordinates of the upper left corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_min = round(pcr.clone().west(), 2)
        y_max = round(pcr.clone().north(), 2)
        # - coordinates of the lower right corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_max = round(x_min + cols * deltaLon, 2)
        y_min = round(y_max - rows * deltaLat, 2)

        # cell centres coordinates
        longitudes = np.arange(x_min + deltaLon / 2.0, x_max, deltaLon)
        latitudes = np.arange(y_max - deltaLat / 2.0, y_min, -deltaLat)

        # ~ # cell centres coordinates
        # ~ longitudes = np.linspace(x_min + deltaLon/2., x_max - deltaLon/2., cols)
        # ~ latitudes  = np.linspace(y_max - deltaLat/2., y_min + deltaLat/2., rows)

        # ~ # cell centres coordinates (latitudes and longitudes, directly from the clone maps)
        # ~ longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        # ~ latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]

        return longitudes, latitudes, cellSizeInArcMin
示例#14
0
    def __init__(self, cloneMapFileName, netcdf_attribute_description):

        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = "European Commission - JRC"
        self.attributeDictionary[
            'title'] = "EFAS-Meteo 5km for the Rhine-Meuse basin"
        self.attributeDictionary[
            'source'] = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary[
            'history'] = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014."
        self.attributeDictionary[
            'references'] = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary[
            'comment'] = "Please use this dataset only for Hyper-Hydro test bed experiments. "
        self.attributeDictionary[
            'comment'] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary[
            'comment'] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "

        self.attributeDictionary['description'] = netcdf_attribute_description
示例#15
0
    def overview(self, msr, pollution_threshold=2):
        """Give the overview of costs and st.dev for dem and minor embankments.
        """
        msr_type = msr.settings.loc['msr_type', 1]
        name = msr_type + '_' + msr.settings.loc['ID']

        # Separate between clean and polluted areas
        area_clean = pcr.ifthen(self.pollution_zones >= pollution_threshold,\
                                pcr.nominal(1))
        area_polluted = pcr.ifthen(self.pollution_zones < pollution_threshold,\
                                   pcr.nominal(1))

        area_clean = pcr.defined(msr.area) & pcr.defined(area_clean)
        area_clean = pcr.ifthen(area_clean, pcr.boolean(1))
        area_polluted = pcr.defined(msr.area) &\
                                    pcr.defined(area_polluted)
        area_polluted = pcr.ifthen(area_polluted, pcr.boolean(1))

        # Calculate costs and stddev for all earthwork types.
        flpl_low_values = self.dem_lowering(msr, area_polluted)
        minemb_low_values = self.minemb_lowering(msr, area_polluted)
        groyne_lowering_values = self.groyne_lowering(msr)
        cost_ew = pd.concat(
            [flpl_low_values, groyne_lowering_values, minemb_low_values])

        cost_df = cost_ew.iloc[:, 0:1].T
        cost_df.index = [name]
        std_df = cost_ew.iloc[:, 1:2].T
        std_df.index = [name]
        return cost_df, std_df
def create_city_map(ESA_CCI_land_use_map_filename, city_class_id):
    #Create cities map from land use map
    land_use_map = pcr.readmap(ESA_CCI_land_use_map_filename)
    cities = pcr.ifthenelse(land_use_map == city_class_id, pcr.boolean(1),
                            pcr.boolean(0))
    pcr.report(
        cities,
        os.path.join(os_output_folder, "cities_" + CASE_STUDY_NAME + ".map"))
    return
示例#17
0
def derive_HAND(dem,
                ldd,
                accuThreshold,
                rivers=None,
                basin=None,
                up_area=None):
    """
    Function derives Height-Above-Nearest-Drain.
    See http://www.sciencedirect.com/science/article/pii/S003442570800120X
    Input:
        dem -- pcraster object float32, elevation data
        ldd -- pcraster object direction, local drain directions
        accuThreshold -- upstream amount of cells as threshold for river
            delineation
        rivers=None -- you can provide a rivers layer here. Pixels that are
                        identified as river should have a value > 0, other
                        pixels a value of zero.
        basin=None -- set a boolean pcraster map where areas with True are estimated using the nearest drain in ldd distance
                        and areas with False by means of the nearest friction distance. Friction distance estimated using the
                        upstream area as weight (i.e. drains with a bigger upstream area have a lower friction)
                        the spreadzone operator is used in this case.
        up_area=None -- provide the upstream area (if not assigned a guesstimate is prepared, assuming the LDD covers a
                        full catchment area)
    Output:
        hand -- pcraster bject float32, height, normalised to nearest stream
        dist -- distance to nearest stream measured in cell lengths
            according to D8 directions
    """
    if rivers is None:
        # prepare stream from a strahler threshold
        stream = pcr.ifthenelse(
            pcr.accuflux(ldd, 1) >= accuThreshold, pcr.boolean(1),
            pcr.boolean(0))
    else:
        # convert stream network to boolean
        stream = pcr.boolean(pcr.cover(rivers, 0))
    # determine height in river (in DEM*100 unit as ordinal)
    height_river = pcr.ifthenelse(stream, pcr.ordinal(dem * 100), 0)
    if basin is None:
        up_elevation = pcr.scalar(pcr.subcatchment(ldd, height_river))
    else:
        # use basin to allocate areas outside basin to the nearest stream. Nearest is weighted by upstream area
        if up_area is None:
            up_area = pcr.accuflux(ldd, 1)
        up_area = pcr.ifthen(stream, up_area)  # mask areas outside streams
        friction = 1. / pcr.scalar(
            pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0))
        # if basin, use nearest river within subcatchment, if outside basin, use weighted-nearest river
        up_elevation = pcr.ifthenelse(
            basin, pcr.scalar(pcr.subcatchment(ldd, height_river)),
            pcr.scalar(pcr.spreadzone(height_river, 0, friction)))
        # replace areas outside of basin by a spread zone calculation.
    hand = pcr.max(pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation,
                   0) / 100  # convert back to float in DEM units
    # hand = (pcr.scalar(pcr.ordinal(dem*100))-up_elevation)/100  # convert back to float in DEM units
    dist = pcr.ldddist(ldd, stream, 1)  # compute horizontal distance estimate
    return hand, dist
def create_pcr_clone_and_nullmask(land_use_map_filename):
    land_use_map = pcr.readmap(land_use_map_filename)
    clone_map = pcr.boolean(1)
    pcr.report(
        clone_map,
        os.path.join(os_output_folder, "clone_" + CASE_STUDY_NAME + ".map"))
    nullmask_map = pcr.ifthen(pcr.boolean(land_use_map), pcr.boolean(0))
    pcr.report(
        nullmask_map,
        os.path.join(os_output_folder, "nullMask_" + CASE_STUDY_NAME + ".map"))
    return clone_map, nullmask_map
def finalize_prot_area_maps(protected_areas_map, nullmask_map):
    #Finalizing protected areas
    protected_areas_map_country = pcr.cover(protected_areas_map, 0)
    protected_areas_map_country_cut = pcr.ifthen(nullmask_map == 0,
                                                 protected_areas_map_country)
    protected_areas_map_country_bool = pcr.ifthenelse(
        pcr.scalar(protected_areas_map_country_cut) > 0, pcr.boolean(1),
        pcr.boolean(0))
    pcr.report(
        protected_areas_map_country_bool,
        os.path.join(os_output_folder,
                     "protected_areas_map_" + CASE_STUDY_NAME + "_bool.map"))
示例#20
0
def selectCatchments(ldd, pitsMap, pitsDict, continentList, cloneMap):
    catchments = pcr.ifthen(pcr.boolean(pcr.readmap(cloneMap)) == 0, pcr.scalar(1))    
    for continent in pitsDict.iterkeys():
        continentNr   = pitsDict[continent][0]
        pitsContinent = pitsDict[continent][1:]
        print 'updating catchments for ', continent
        for pitNr in pitsContinent:
            pitMap = pcr.ifthen(pitsMap == pitNr, pcr.scalar(continentNr))
            catchment = pcr.catchment(ldd, pcr.nominal(pitMap))
            catchment = pcr.ifthen(pcr.boolean(catchment) == 1, catchment)
            catchments = pcr.cover(catchments, pcr.scalar(catchment))
    return catchments
示例#21
0
def derive_HAND(dem, ldd, accuThreshold, rivers=None, basin=None, up_area=None, neg_HAND=None):
    """
    Function derives Height-Above-Nearest-Drain.
    See http://www.sciencedirect.com/science/article/pii/S003442570800120X
    Input:
        dem -- pcraster object float32, elevation data
        ldd -- pcraster object direction, local drain directions
        accuThreshold -- upstream amount of cells as threshold for river
            delineation
        rivers=None -- you can provide a rivers layer here. Pixels that are
                        identified as river should have a value > 0, other
                        pixels a value of zero.
        basin=None -- set a boolean pcraster map where areas with True are estimated using the nearest drain in ldd distance
                        and areas with False by means of the nearest friction distance. Friction distance estimated using the
                        upstream area as weight (i.e. drains with a bigger upstream area have a lower friction)
                        the spreadzone operator is used in this case.
        up_area=None -- provide the upstream area (if not assigned a guesstimate is prepared, assuming the LDD covers a
                        full catchment area)
        neg_HAND=None -- if set to 1, HAND maps can have negative values when elevation outside of stream is lower than
                        stream (for example when there are natural embankments)
    Output:
        hand -- pcraster bject float32, height, normalised to nearest stream
        dist -- distance to nearest stream measured in cell lengths
            according to D8 directions
    """
    if rivers is None:
        # prepare stream from a strahler threshold
        stream = pcr.ifthenelse(pcr.accuflux(ldd, 1) >= accuThreshold,
                                pcr.boolean(1), pcr.boolean(0))
    else:
        # convert stream network to boolean
        stream = pcr.boolean(pcr.cover(rivers, 0))
    # determine height in river (in DEM*100 unit as ordinal)
    height_river = pcr.ifthenelse(stream, pcr.ordinal(dem*100), 0)
    if basin is None:
        up_elevation = pcr.scalar(pcr.subcatchment(ldd, height_river))
    else:
        # use basin to allocate areas outside basin to the nearest stream. Nearest is weighted by upstream area
        if up_area is None:
            up_area = pcr.accuflux(ldd, 1)
        up_area = pcr.ifthen(stream, up_area)  # mask areas outside streams
        friction = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0))
        # if basin, use nearest river within subcatchment, if outside basin, use weighted-nearest river
        up_elevation = pcr.ifthenelse(basin, pcr.scalar(pcr.subcatchment(ldd, height_river)), pcr.scalar(pcr.spreadzone(height_river, 0, friction)))
        # replace areas outside of basin by a spread zone calculation.
    # make negative HANDS also possible
    if neg_HAND == 1:
        hand = (pcr.scalar(pcr.ordinal(dem*100))-up_elevation)/100  # convert back to float in DEM units
    else:
        hand = pcr.max(pcr.scalar(pcr.ordinal(dem*100))-up_elevation, 0)/100  # convert back to float in DEM units
    dist = pcr.ldddist(ldd, stream, 1)  # compute horizontal distance estimate
    return hand, dist
示例#22
0
def pointPerClass(classMap):
    """ Select a single random point from each class in classMap"""
    rand1 = 100 * pcr.uniform(pcr.boolean(classMap))
    rand2 = 100 * pcr.uniform(pcr.boolean(classMap))
    rand3 = 100 * pcr.uniform(pcr.boolean(classMap))

    randomMap = pcr.scalar(classMap) * rand1 * rand2 * rand3
    pointMap = pcr.ifthen(randomMap == pcr.areaminimum(randomMap, classMap),
                          classMap)
    nrPointsPerClass = pcr.areatotal(pcr.scalar(pcr.boolean(pointMap)),
                                     classMap)
    assert pcr.cellvalue(pcr.mapmaximum(nrPointsPerClass), 0)[0] == 1
    return pointMap
    def __init__(self,cloneMapFileName,netcdf_attribute_description):
        		
        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))
        
        # properties of the clone maps
        # - numbers of rows and colums
        rows = pcr.clone().nrRows() 
        cols = pcr.clone().nrCols()
        # - cell size in arc minutes rounded to one value behind the decimal
        cellSizeInArcMin = round(pcr.clone().cellSize() * 60.0, 1) 
        # - cell sizes in ar degrees for longitude and langitude direction 
        deltaLon = cellSizeInArcMin / 60.
        deltaLat = deltaLon
        # - coordinates of the upper left corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_min = round(pcr.clone().west(), 2)
        y_max = round(pcr.clone().north(), 2)
        # - coordinates of the lower right corner - rounded to two values behind the decimal in order to avoid rounding errors during (future) resampling process
        x_max = round(x_min + cols*deltaLon, 2) 
        y_min = round(y_max - rows*deltaLat, 2) 
        
        # cell centres coordinates
        self.longitudes = np.arange(x_min + deltaLon/2., x_max, deltaLon)
        self.latitudes  = np.arange(y_max - deltaLat/2., y_min,-deltaLat)

        #~ # cell centres coordinates
        #~ self.longitudes = np.linspace(x_min + deltaLon/2., x_max - deltaLon/2., cols)
        #~ self.latitudes  = np.linspace(y_max - deltaLat/2., y_min + deltaLat/2., rows)
        
        #~ # cell centres coordinates (latitudes and longitudes, directly from the clone maps)
        #~ self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        #~ self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        
        # netCDF format and attributes:
        important_information = "The dataset was resampled to "+str(cellSizeInArcMin)+" arc minute resolution. "
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution']  = "European Commission - JRC and Department of Physical Geography, Utrecht University"
        self.attributeDictionary['title'      ]  = "EFAS-Meteo 5km for Rhine-Meuse - resampled to "+str(cellSizeInArcMin)+" arc minute resolution. "
        self.attributeDictionary['source'     ]  = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary['history'    ]  = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf. "
        self.attributeDictionary['history'    ] += important_information 
        self.attributeDictionary['references' ]  = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary['comment'    ]  = "Please use this dataset only for Hyper-Hydro test bed experiments. " 
        self.attributeDictionary['comment'    ] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary['comment'    ] += "The original data provided by JRC are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "
        self.attributeDictionary['comment'    ] += important_information 

        self.attributeDictionary['description']  = netcdf_attribute_description
示例#24
0
def map_edges(clone):
    """Boolean map true map edges, false elsewhere"""

    pcr.setglobaloption('unittrue')
    xmin, xmax, ymin, ymax, nr_rows, nr_cols, cell_size = clone_attributes()
    clone = pcr.ifthenelse(pcr.defined(clone), pcr.boolean(1), pcr.boolean(1))
    x_coor = pcr.xcoordinate(clone)
    y_coor = pcr.ycoordinate(clone)
    north = y_coor > (ymax - cell_size)
    south = y_coor < (ymin + cell_size)
    west = x_coor < (xmin + cell_size)
    east = x_coor > (xmax - cell_size)
    edges = north | south | west | east
    return edges
示例#25
0
    def relocation(self, reloc_area):
        """Sum up the cost and st.dev of dike relocation."""
        reloc_area = pcr.cover(pcr.boolean(reloc_area), pcr.boolean(0))
        buffer_m = 1.5 * pcr.clone().cellSize(
        )  # buffer 1 cell in 8 directions
        reloc_buffered = pcr.spreadmaxzone(reloc_area, 0, 1, buffer_m)
        reloc_length = pcr.ifthen(reloc_buffered, self.dike_length)
        cost_spatial = 0.001 * reloc_length * self.dike_reloc_distr.mean
        std_spatial = 0.001 * reloc_length * self.dike_reloc_distr.stddev

        area = pcr.ifthen(reloc_area, pcr.boolean(1))
        cost = area_total_value(cost_spatial, area)
        std = area_total_value(std_spatial, area)

        return cost, std
示例#26
0
def subcatch_stream(ldd, stream, threshold):
    """
    Derive catchments based upon strahler threshold
    Input:
        ldd -- pcraster object direction, local drain directions
        stream -- pcraster object direction, streamorder
        threshold -- integer, strahler threshold, subcatchments ge threshold are
                 derived
    output:
        stream_ge -- pcraster object, streams of strahler order ge threshold
        subcatch -- pcraster object, subcatchments of strahler order ge threshold

    """
    # derive stream order

    # stream = pcr.streamorder(ldd)
    stream_ge = pcr.ifthen(stream >= threshold, stream)
    stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
    # detect any transfer of strahler order, to a higher strahler order.
    transition_strahler = pcr.ifthenelse(pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
                                         pcr.ifthenelse(pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse(pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1),
                                                                                           pcr.boolean(0))))

    # make unique ids (write to file)
    transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))

    # derive upstream catchment areas (write to file)
    subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
    return stream_ge, subcatch
示例#27
0
def mapEllipse(r_a, r_b, azimuth, latC, lonC, lat, lon, numberStepsArc= 360):
	'''Maps an ellipse for the given spatial attributes using a predefined number \
of steps along the arc:
 r_a: length of semi_major axis
 r_b: length of semi_major axis
 azimuth: angle of semi_major axis with the centroid to north
 latC: latitude of centroid
 lonC: longitude of centroid
 lat: map of latitude
 lon: map of longitude
 numberStepsArc= 360 (default)

where r= r_a*r_b/(r_a**2*sin(theta)**2+r_b**2*cos(theta)**2)**0.5 \n'''
	#-compute theta as the azimuth and rotated towards the semi-major axis
	theta= np.linspace(0.,360.-360./numberStepsArc,numberStepsArc)
	radius= (r_a*r_b)/(r_a**2*(np.sin(theta*deg2Rad))**2+r_b**2*(np.cos(theta*deg2Rad))**2)**0.5
	theta= (theta+azimuth) % 360.
	lat1, lon1= getDestinationPoint(latC, lonC, radius, theta)
	ellipse= pcr.boolean(0)
	for iCnt in xrange(lat1.size):
		ellipse= ellipse | \
			(pcr.abs(lat-lat1[iCnt]) == pcr.mapminimum(pcr.abs(lat-lat1[iCnt]))) & \
			(pcr.abs(lon-lon1[iCnt]) == pcr.mapminimum(pcr.abs(lon-lon1[iCnt])))
	ellipse= pcr.ifthen(ellipse, ellipse)
	#-return ellipse
	return ellipse
示例#28
0
  def test_001(self):
      """ nonspatial and pcr2numpy """
      nrRows, nrCols, cellSize = 5, 8, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      value = 1.23456
      nonspatial = pcraster.scalar(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)

      value = 3
      nonspatial = pcraster.nominal(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)

      value = True
      nonspatial = pcraster.boolean(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)
示例#29
0
文件: test.py 项目: gaoshuai/pcraster
  def testNonSpatialConversions(self):
    nonSpatialValue = pcraster.mapmaximum(pcraster.readmap("map2asc_PCRmap.map"))
    # Ordinal.
    nonSpatial = pcraster.ordinal(nonSpatialValue)
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 124)
    self.assertEqual(float(nonSpatial), 124.0)

    # Nominal.
    nonSpatial = pcraster.nominal(nonSpatialValue)
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 124)
    self.assertEqual(float(nonSpatial), 124)

    # Boolean.
    nonSpatial = pcraster.boolean(nonSpatialValue)
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 1)
    self.assertEqual(float(nonSpatial), 1.0)

    # Scalar.
    nonSpatial = pcraster.scalar(pcraster.mapmaximum("abs_Expr.map"))
    self.assertEqual(bool(nonSpatial), True)
    self.assertEqual(int(nonSpatial), 14)
    self.assertEqual(float(nonSpatial), 14.0)
示例#30
0
    def __init__(self,configuration,model,specificAttributeDictionary=None):

	# Set clone map
        pcr.setclone(configuration.cloneMap)
        cloneMap = pcr.boolean(1.0)  # map with all cell values equal to 1

        # Retrieve latitudes and longitudes from clone map
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        self.crops  = np.arange(1, model.nCrop + 1)
        self.depths = np.arange(1, model.nComp + 1)
        
        # Let users decide what their preference regarding latitude order
        self.netcdf_y_orientation_follow_cf_convention = False
        if 'netcdf_y_orientation_follow_cf_convention' in configuration.reportingOptions.keys() and\
            configuration.reportingOptions['netcdf_y_orientation_follow_cf_convention'] == "True":
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))
        
        # Set general netcdf attributes (based on the information given in the ini/configuration file) 
        self.set_general_netcdf_attributes(configuration, specificAttributeDictionary)
        
        # netcdf format and zlib setup 
        self.format = 'NETCDF3_CLASSIC'
        self.zlib = False
        if "formatNetCDF" in configuration.reportingOptions.keys():
            self.format = str(configuration.reportingOptions['formatNetCDF'])
        if "zlib" in configuration.reportingOptions.keys():
            if configuration.reportingOptions['zlib'] == "True": self.zlib = True
示例#31
0
def snaptomap(points, mmap):
    """
    Snap the points in _points_ to nearest non missing
    values in _mmap_. Can be used to move gauge locations
    to the nearest rivers.

    Input:
        - points - map with points to move
        - mmap - map with points to move to

    Return:
        - map with shifted points
    """
    points = pcr.cover(points, 0)
    # Create unique id map of mmap cells
    unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
    # Now fill holes in mmap map with lues indicating the closes mmap cell.
    dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
    # Get map with values at location in points with closes mmap cell
    dist_cellid = pcr.ifthenelse(points > 0, dist_cellid, 0)
    # Spread this out
    dist_fill = pcr.spreadzone(pcr.nominal(dist_cellid), 0, 1)
    # Find the new (moved) locations
    npt = pcr.uniqueid(pcr.boolean(pcr.ifthen(dist_fill == unq, unq)))
    # Now recreate the original value in the points maps
    ptcover = pcr.spreadzone(pcr.cover(points, 0), 0, 1)
    # Now get the org point value in the pt map
    nptorg = pcr.ifthen(npt > 0, ptcover)

    return nptorg
示例#32
0
    def __init__(self, iniItems):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(pcr2numpy(pcr.ycoordinate(cloneMap),
                                             vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # TODO: Let users decide what their preference regarding latitude order.
        #       Consult with Stefanie regarding CF convention.

        # netCDF format and attributes:
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = iniItems.globalOptions[
            'institution']
        self.attributeDictionary['title'] = iniItems.globalOptions['title']
        self.attributeDictionary['description'] = iniItems.globalOptions[
            'description']

        # netcdf format and zlib setup
        self.format = 'NETCDF3_CLASSIC'
        self.zlib = False
        if "formatNetCDF" in iniItems.reportingOptions.keys():
            self.format = str(iniItems.reportingOptions['formatNetCDF'])
        if "zlib" in iniItems.reportingOptions.keys():
            if iniItems.reportingOptions['zlib'] == "True": self.zlib = True
示例#33
0
    def test_001(self):
        """ nonspatial and pcr2numpy """
        nrRows, nrCols, cellSize = 5, 8, 1.0
        west, north = 0.0, 0.0
        pcraster.setclone(nrRows, nrCols, cellSize, west, north)

        value = 1.23456
        nonspatial = pcraster.scalar(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)

        value = 3
        nonspatial = pcraster.nominal(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)

        value = True
        nonspatial = pcraster.boolean(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)
示例#34
0
    def testNonSpatialConversions(self):
        nonSpatialValue = pcraster.mapmaximum(
            pcraster.readmap("map2asc_PCRmap.map"))
        # Ordinal.
        nonSpatial = pcraster.ordinal(nonSpatialValue)
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 124)
        self.assertEqual(float(nonSpatial), 124.0)

        # Nominal.
        nonSpatial = pcraster.nominal(nonSpatialValue)
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 124)
        self.assertEqual(float(nonSpatial), 124)

        # Boolean.
        nonSpatial = pcraster.boolean(nonSpatialValue)
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 1)
        self.assertEqual(float(nonSpatial), 1.0)

        # Scalar.
        nonSpatial = pcraster.scalar(pcraster.mapmaximum("abs_Expr.map"))
        self.assertEqual(bool(nonSpatial), True)
        self.assertEqual(int(nonSpatial), 14)
        self.assertEqual(float(nonSpatial), 14.0)
示例#35
0
def snaptomap(points, mmap):
    """
    Snap the points in _points_ to nearest non missing
    values in _mmap_. Can be used to move gauge locations
    to the nearest rivers.

    Input:
        - points - map with points to move
        - mmap - map with points to move to

    Return:
        - map with shifted points
    """
    points = pcr.cover(points, 0)
    # Create unique id map of mmap cells
    unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
    # Now fill holes in mmap map with lues indicating the closes mmap cell.
    dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
    # Get map with values at location in points with closes mmap cell
    dist_cellid = pcr.ifthenelse(points > 0, dist_cellid, 0)
    # Spread this out
    dist_fill = pcr.spreadzone(pcr.nominal(dist_cellid), 0, 1)
    # Find the new (moved) locations
    npt = pcr.uniqueid(pcr.boolean(pcr.ifthen(dist_fill == unq, unq)))
    # Now recreate the original value in the points maps
    ptcover = pcr.spreadzone(pcr.cover(points, 0), 0, 1)
    # Now get the org point value in the pt map
    nptorg = pcr.ifthen(npt > 0, ptcover)

    return nptorg
示例#36
0
def getCellValues(pointMap, mapList=[], columns=[]):
    """ Get the cell values of the maps in mapList at the locations of pointMap
    """
    #-determine where the indices are True
    arr = pcr.pcr2numpy(pcr.boolean(pointMap), 0).astype('bool')
    indices = np.where(arr == True)

    #-loop over the points in pointMap
    pcr.setglobaloption('unitcell')
    ll = []
    for rowIdx, colIdx in zip(indices[0], indices[1]):
        line = []
        line.append(rowIdx)
        line.append(colIdx)
        for pcrMap in mapList:
            line.append(
                pcr.cellvalue(pcrMap, np.int(rowIdx + 1),
                              np.int(colIdx + 1))[0])
        ll.append(line)

    #-optionally add column names
    if len(columns) == len(mapList):
        columnNames = ['rowIdx', 'colIdx'] + columns
    else:
        columnNames = ['rowIdx', 'colIdx'] + \
                    ['map' + str(ii) for ii in range(1, 1 + len(mapList), 1)]

    #-return as Pandas DataFrame
    return pd.DataFrame(np.array(ll), columns=columnNames)
示例#37
0
def pcr2col(listOfMaps, MV, selection='ONE_TRUE'):
    """converts a set of maps to a column array: X, Y, map values
       selection can be set to ALL, ALL_TRUE, ONE_TRUE"""

    #-intersect all maps and get X and Y coordinates
    intersection = pcr.boolean(pcr.cover(listOfMaps[0], 0))
    for mapX in listOfMaps[1:]:
        intersection = intersection | pcr.boolean(pcr.cover(mapX, 0))
    pcr.setglobaloption("unittrue")
    xCoor = pcr.ifthen(intersection, pcr.xcoordinate(intersection))
    yCoor = pcr.ifthen(intersection, pcr.ycoordinate(intersection))
    pcr.setglobaloption("unitcell")

    #-initiate outArray with xCoor and yCoor
    xCoorArr = pcr.pcr2numpy(xCoor, MV)
    yCoorArr = pcr.pcr2numpy(yCoor, MV)
    nRows, nCols = xCoorArr.shape
    nrCells = nRows * nCols
    outArray = np.hstack((xCoorArr.reshape(nrCells,
                                           1), yCoorArr.reshape(nrCells, 1)))

    #-add subsequent maps
    for mapX in listOfMaps:
        arr = pcr.pcr2numpy(mapX, MV).reshape(nrCells, 1)
        outArray = np.hstack((outArray, arr))

    #-subset output based on selection criterium
    ll = []
    nrMaps = len(listOfMaps)
    if selection == 'ONE_TRUE':
        for line in outArray:
            nrMV = len(line[line == MV])
            if nrMV < nrMaps:
                ll.append(line)
            else:
                pass
        outArray = np.array(ll)
    elif selection == 'ALL_TRUE':
        for line in outArray:
            if MV not in line:
                ll.append(line)
            else:
                pass
        outArray = np.array(ll)
    elif selection == 'ALL':
        pass
    return outArray
示例#38
0
    def _parseLine(self, line, lineNumber, nrColumns, externalNames, keyDict):

        line = re.sub("\n", "", line)
        line = re.sub("\t", " ", line)
        result = None

        # read until first comment
        content = ""
        content, sep, comment = line.partition("#")
        if len(content) > 1:
            collectionVariableName, sep, tail = content.partition(" ")
            if collectionVariableName == self._varName:
                tail = tail.strip()
                key, sep, variableValue = tail.rpartition(" ")

                if len(key.split()) != nrColumns:
                    tmp = re.sub("\(|\)|,", "", str(key))
                    msg = "Error reading %s line %d, order of columns given (%s columns) does not match expected order of %s columns" % (
                        self._fileName, lineNumber, len(key.split()) + 2,
                        int(nrColumns) + 2)
                    raise ValueError(msg)

                variableValue = re.sub('\"', "", variableValue)

                tmp = None
                try:
                    tmp = int(variableValue)
                    if self._dataType == pcraster.Boolean:
                        tmp = pcraster.boolean(tmp)
                    elif self._dataType == pcraster.Nominal:
                        tmp = pcraster.nominal(tmp)
                    elif self._dataType == pcraster.Ordinal:
                        tmp = pcraster.ordinal(tmp)
                    elif self._dataType == pcraster.Ldd:
                        tmp = pcraster.ldd(tmp)
                    else:
                        msg = "Conversion to %s failed" % (self._dataType)
                        raise Exception(msg)
                except ValueError, e:
                    try:
                        tmp = float(variableValue)
                        if self._dataType == pcraster.Scalar:
                            tmp = pcraster.scalar(tmp)
                        elif self._dataType == pcraster.Directional:
                            tmp = pcraster.directional(tmp)
                        else:
                            msg = "Conversion to %s failed" % (self._dataType)
                            raise Exception(msg)

                    except ValueError, e:
                        variableValue = re.sub("\\\\", "/", variableValue)
                        variableValue = variableValue.strip()
                        path = os.path.normpath(variableValue)
                        try:
                            tmp = pcraster.readmap(path)
                        except RuntimeError, e:
                            msg = "Error reading %s line %d, %s" % (
                                self._fileName, lineNumber, e)
                            raise ValueError(msg)
示例#39
0
    def __init__(self, iniItems, specificAttributeDictionary=None):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[
            ::-1
        ]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # Let users decide what their preference regarding latitude order.
        self.netcdf_y_orientation_follow_cf_convention = False
        if (
            "netcdf_y_orientation_follow_cf_convention"
            in list(iniItems.reportingOptions.keys())
            and iniItems.reportingOptions["netcdf_y_orientation_follow_cf_convention"]
            == "True"
        ):
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))

        # set the general netcdf attributes (based on the information given in the ini/configuration file)
        self.set_general_netcdf_attributes(iniItems, specificAttributeDictionary)

        # netcdf format and zlib setup
        self.format = "NETCDF3_CLASSIC"
        self.zlib = False
        if "formatNetCDF" in list(iniItems.reportingOptions.keys()):
            self.format = str(iniItems.reportingOptions["formatNetCDF"])
        if "zlib" in list(iniItems.reportingOptions.keys()):
            if iniItems.reportingOptions["zlib"] == "True":
                self.zlib = True

        # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files'
        if "specific_attributes_for_netcdf_output_files" in iniItems.allSections:
            for key in list(
                iniItems.specific_attributes_for_netcdf_output_files.keys()
            ):

                self.attributeDictionary[
                    key
                ] = iniItems.specific_attributes_for_netcdf_output_files[key]

                if self.attributeDictionary[key] == "None":
                    self.attributeDictionary[key] = ""

                if key == "history" and self.attributeDictionary[key] == "Default":
                    self.attributeDictionary[
                        key
                    ] = "created on " + datetime.datetime.today().isoformat(" ")
                if self.attributeDictionary[key] == "Default" and (
                    key == "date_created" or key == "date_issued"
                ):
                    self.attributeDictionary[key] = datetime.datetime.today().isoformat(
                        " "
                    )
示例#40
0
def derive_HAND(dem, ldd, accuThreshold, rivers=None, basin=None):
    """
    Function derives Height-Above-Nearest-Drain.
    See http://www.sciencedirect.com/science/article/pii/S003442570800120X
    Input:
        dem -- pcraster object float32, elevation data
        ldd -- pcraster object direction, local drain directions
        accuThreshold -- upstream amount of cells as threshold for river
            delineation
        rivers=None -- you can provide a rivers layer here. Pixels that are
                        identified as river should have a value > 0, other
                        pixels a value of zero.
        basin=None -- set a boolean pcraster map where areas with True are estimated using the nearest drain in ldd distance
                        and areas with False by means of the nearest friction distance. Friction distance estimated using the
                        upstream area as weight (i.e. drains with a bigger upstream area have a lower friction)
                        the spreadzone operator is used in this case.
    Output:
        hand -- pcraster bject float32, height, normalised to nearest stream
        dist -- distance to nearest stream measured in cell lengths
            according to D8 directions
    """
    if rivers is None:
        stream = pcr.ifthenelse(
            pcr.accuflux(ldd, 1) >= accuThreshold, pcr.boolean(1), pcr.boolean(0)
        )
    else:
        stream = pcr.boolean(pcr.cover(rivers, 0))

    height_river = pcr.ifthenelse(stream, pcr.ordinal(dem * 100), 0)
    if basin is None:
        up_elevation = pcr.scalar(pcr.subcatchment(ldd, height_river))
    else:
        drainage_surf = pcr.ifthen(rivers, pcr.accuflux(ldd, 1))
        weight = 1.0 / pcr.scalar(
            pcr.spreadzone(pcr.cover(pcr.ordinal(drainage_surf), 0), 0, 0)
        )
        up_elevation = pcr.ifthenelse(
            basin,
            pcr.scalar(pcr.subcatchment(ldd, height_river)),
            pcr.scalar(pcr.spreadzone(height_river, 0, weight)),
        )
        # replace areas outside of basin by a spread zone calculation.
    hand = pcr.max(pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation, 0) / 100
    dist = pcr.ldddist(ldd, stream, 1)

    return hand, dist
示例#41
0
def derive_HAND(dem, ldd, accuThreshold, rivers=None, basin=None):
    """
    Function derives Height-Above-Nearest-Drain.
    See http://www.sciencedirect.com/science/article/pii/S003442570800120X
    Input:
        dem -- pcraster object float32, elevation data
        ldd -- pcraster object direction, local drain directions
        accuThreshold -- upstream amount of cells as threshold for river
            delineation
        rivers=None -- you can provide a rivers layer here. Pixels that are
                        identified as river should have a value > 0, other
                        pixels a value of zero.
        basin=None -- set a boolean pcraster map where areas with True are estimated using the nearest drain in ldd distance
                        and areas with False by means of the nearest friction distance. Friction distance estimated using the
                        upstream area as weight (i.e. drains with a bigger upstream area have a lower friction)
                        the spreadzone operator is used in this case.
    Output:
        hand -- pcraster bject float32, height, normalised to nearest stream
        dist -- distance to nearest stream measured in cell lengths
            according to D8 directions
    """
    if rivers is None:
        stream = pcr.ifthenelse(
            pcr.accuflux(ldd, 1) >= accuThreshold, pcr.boolean(1), pcr.boolean(0)
        )
    else:
        stream = pcr.boolean(pcr.cover(rivers, 0))

    height_river = pcr.ifthenelse(stream, pcr.ordinal(dem * 100), 0)
    if basin is None:
        up_elevation = pcr.scalar(pcr.subcatchment(ldd, height_river))
    else:
        drainage_surf = pcr.ifthen(rivers, pcr.accuflux(ldd, 1))
        weight = 1.0 / pcr.scalar(
            pcr.spreadzone(pcr.cover(pcr.ordinal(drainage_surf), 0), 0, 0)
        )
        up_elevation = pcr.ifthenelse(
            basin,
            pcr.scalar(pcr.subcatchment(ldd, height_river)),
            pcr.scalar(pcr.spreadzone(height_river, 0, weight)),
        )
        # replace areas outside of basin by a spread zone calculation.
    hand = pcr.max(pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation, 0) / 100
    dist = pcr.ldddist(ldd, stream, 1)

    return hand, dist
示例#42
0
  def _parseLine(self, line, lineNumber, nrColumns, externalNames, keyDict):

    line = re.sub("\n","",line)
    line = re.sub("\t"," ",line)
    result = None

    # read until first comment
    content = ""
    content,sep,comment = line.partition("#")
    if len(content) > 1:
      collectionVariableName, sep, tail = content.partition(" ")
      if collectionVariableName == self._varName:
        tail = tail.strip()
        key, sep, variableValue = tail.rpartition(" ")

        if len(key.split()) != nrColumns:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, order of columns given (%s columns) does not match expected order of %s columns" %(self._fileName, lineNumber, len(key.split()) + 2, int(nrColumns) + 2)
          raise ValueError(msg)

        variableValue = re.sub('\"', "", variableValue)

        tmp = None
        try:
          tmp = int(variableValue)
          if self._dataType == pcraster.Boolean:
            tmp = pcraster.boolean(tmp)
          elif self._dataType == pcraster.Nominal:
            tmp = pcraster.nominal(tmp)
          elif self._dataType == pcraster.Ordinal:
            tmp = pcraster.ordinal(tmp)
          elif self._dataType == pcraster.Ldd:
            tmp = pcraster.ldd(tmp)
          else:
            msg = "Conversion to %s failed" % (self._dataType)
            raise Exception(msg)
        except ValueError, e:
          try:
            tmp = float(variableValue)
            if self._dataType == pcraster.Scalar:
              tmp = pcraster.scalar(tmp)
            elif self._dataType == pcraster.Directional:
              tmp = pcraster.directional(tmp)
            else:
              msg = "Conversion to %s failed" % (self._dataType)
              raise Exception(msg)

          except ValueError,e:
            variableValue = re.sub("\\\\","/",variableValue)
            variableValue = variableValue.strip()
            path = os.path.normpath(variableValue)
            try:
              tmp = pcraster.readmap(path)
            except RuntimeError, e:
              msg = "Error reading %s line %d, %s" %(self._fileName, lineNumber, e)
              raise ValueError(msg)
示例#43
0
    def __init__(self,cloneMapFileName,netcdf_attribute_description):
        		
        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))
        
        # latitudes and longitudes
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution']  = "European Commission - JRC"
        self.attributeDictionary['title'      ]  = "EFAS-Meteo 5km for the Rhine-Meuse basin"
        self.attributeDictionary['source'     ]  = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary['history'    ]  = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014."
        self.attributeDictionary['references' ]  = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary['comment'    ]  = "Please use this dataset only for Hyper-Hydro test bed experiments. " 
        self.attributeDictionary['comment'    ] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary['comment'    ] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "

        self.attributeDictionary['description']  = netcdf_attribute_description
示例#44
0
文件: test.py 项目: gaoshuai/pcraster
  def testIfThenElse(self):
    pcraster.setclone("and_Expr1.map")
    exceptionThrown = False
    try:
      result = pcraster.ifthenelse(1.0 == 2.0, 3.0, 4.0)
    except RuntimeError as exception:
      message = str(exception)
      self.assertTrue(message.find("conversion function to pick a data type") != -1)
      exceptionThrown = True
    self.assertTrue(exceptionThrown)

    result = pcraster.ifthenelse(pcraster.boolean(1.0 == 2.0), \
         pcraster.scalar(3.0), pcraster.scalar(4.0))
    self.assertEqual(pcraster.cellvalue(result, 1)[0], 4.0)
示例#45
0
def upscale_riverlength(ldd, order, factor):
    """
    Upscales the riverlength using 'factor'
    The resulting maps can be resampled (e.g. using resample.exe) by factor and should
    include the accurate length as determined with the original higher
    resolution maps.  This function is **depricated**,
    use are_riverlength instead as this version
    is very slow for large maps

    Input:
        - ldd
        - minimum streamorder to include

    Output:
        - distance per factor cells
    """

    strorder = pcr.streamorder(ldd)
    strorder = pcr.ifthen(strorder >= order, strorder)
    dist = pcr.cover(
        pcr.max(
            pcr.celllength(), pcr.ifthen(pcr.boolean(strorder), pcr.downstreamdist(ldd))
        ),
        0,
    )
    totdist = pcr.max(
        pcr.ifthen(
            pcr.boolean(strorder),
            pcr.windowtotal(
                pcr.ifthen(pcr.boolean(strorder), dist), pcr.celllength() * factor
            ),
        ),
        dist,
    )

    return totdist
示例#46
0
def interpolategauges(inputmap, method):
    """"
    Interpolate time series gauge data onto a grid using different methods
    inputmap: map with points data for a single timestep
    method: string indicating the method
        inv
        pol
        
    input: inputmap, method
    returns: interpolated map
    """

    if method == "inv":
        result = pcr.inversedistance(1, inputmap, 3, 0, 0)
    elif method == "pol":
        Unq = pcr.uniqueid(pcr.boolean(inputmap + 1))
        result = pcr.spreadzone(pcr.ordinal(pcr.cover(Unq, 0)), 0, 1)
        result = pcr.areaaverage(inputmap, result)
    else:
        Unq = pcr.uniqueid(pcr.boolean(inputmap + 1))
        result = pcr.spreadzone(pcr.ordinal(pcr.cover(Unq, 0)), 0, 1)
        result = pcr.areaaverage(inputmap, result)

    return result
示例#47
0
  def test_003(self):
      """ pcr2numpy should not run out of memory """

      nrRows, nrCols, cellSize = 200, 200, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      raster = pcraster.uniform(1)

      process = psutil.Process(os.getpid())
      mem = process.memory_info()
      init_mem = mem.rss / 2**10

      nr_iterations = 50
      mem_increase = False

      # small memory increase can occur at runtime
      # allow for, but less than iterations * size(raster)
      max_diff = 400

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      raster = pcraster.spatial(pcraster.boolean(1))

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      raster = pcraster.nominal(pcraster.uniform(1) * 10)

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      self.assertEqual(mem_increase, False)
示例#48
0
def riverlength(ldd, order):
    """
    Determines the length of a river using the ldd.
    only determined for order and higher.

    Input:
        - ldd, order (streamorder)

    Returns:
        - totallength,lengthpercell, streamorder
    """
    strorder = pcr.streamorder(ldd)
    strorder = pcr.ifthen(strorder >= pcr.ordinal(order), strorder)
    dist = pcr.max(
        pcr.celllength(), pcr.ifthen(pcr.boolean(strorder), pcr.downstreamdist(ldd))
    )

    return pcr.catchmenttotal(pcr.cover(dist, 0), ldd), dist, strorder
    def __init__(self, cloneMapFile, attribute=None, cellSizeInArcMinutes=None):
        		
        # cloneMap
        # - the cloneMap must be at 5 arc min resolution
        cloneMap = pcr.readmap(cloneMapFile)
        cloneMap = pcr.boolean(1.0)
        
        # latitudes and longitudes
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        #~ # properties of the clone map
        #~ # - number of rows and columns
        #~ self.nrRows       = np.round(pcr.clone().nrRows())    
        #~ self.nrCols       = np.round(pcr.clone().nrCols())  
        #~ # - upper right coordinate, unit: arc degree ; must be integer (without decimals)
        #~ self.minLongitude = np.round(pcr.clone().west() , 0)         
        #~ self.maxLatitude  = np.round(pcr.clone().north(), 0)
        #~ # - cell resolution, unit: arc degree
        #~ self.cellSize     = pcr.clone().cellSize()
        #~ if cellSizeInArcMinutes != None: self.cellSize = cellSizeInArcMinutes / 60.0 
        #~ # - lower right coordinate, unit: arc degree ; must be integer (without decimals)
        #~ self.maxLongitude = np.round(self.minLongitude + self.cellSize*self.nrCols, 0)         
        #~ self.minLatitude  = np.round(self.maxLatitude  - self.cellSize*self.nrRows, 0)
        #~ 
        #~ # latitudes and longitudes for netcdf files
        #~ latMin = self.minLatitude  + self.cellSize / 2
        #~ latMax = self.maxLatitude  - self.cellSize / 2
        #~ lonMin = self.minLongitude + self.cellSize / 2
        #~ lonMax = self.maxLongitude - self.cellSize / 2
        #~ self.longitudes = np.arange(lonMin,lonMax+self.cellSize, self.cellSize)
        #~ self.latitudes=   np.arange(latMax,latMin-self.cellSize,-self.cellSize)
        
        # netCDF format and attributes:
        self.format = 'NETCDF4'
        self.attributeDictionary = {}
        if attribute == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'      ] = "None"
            self.attributeDictionary['description'] = "None"
        else:
            self.attributeDictionary = attribute
示例#50
0
def area_river_burnin(ldd, dem, order, Area):
    """
  Calculates the lowest values in as DEM for each erea in an area map for
  river of order *order*

  Input:
      - ldd
      - dem
      - order
      - Area map

  Output:
      - dem
  """
    strorder = pcr.streamorder(ldd)
    strordermax = pcr.areamaximum(strorder, Area)
    maxordcell = pcr.ifthen(strordermax > order, strordermax)
    riverdem = pcr.areaminimum(dem, Area)

    return pcr.ifthen(pcr.boolean(maxordcell), riverdem)
示例#51
0
文件: test.py 项目: gaoshuai/pcraster
  def testDeepCopyRasterNonSpatial(self):
    pcraster.setclone("validated/boolean_Result.map")

    raster = pcraster.boolean(1)
    tmp = copy.deepcopy(raster)
    self.assertEqual(True, self.arbitraryMapEquals(raster, tmp))
    raster1 = pcraster.nominal(1)
    tmp1 = copy.deepcopy(raster1)
    self.assertEqual(True, self.arbitraryMapEquals(raster1, tmp1))
    raster2 = pcraster.ordinal(1)
    tmp2 = copy.deepcopy(raster2)
    self.assertEqual(True, self.arbitraryMapEquals(raster2, tmp2))
    raster3 = pcraster.scalar(1)
    tmp3 = copy.deepcopy(raster3)
    self.assertEqual(True, self.arbitraryMapEquals(raster3, tmp3))
    raster4 = pcraster.directional(1)
    tmp4 = copy.deepcopy(raster4)
    self.assertEqual(True, self.arbitraryMapEquals(raster4, tmp4))
    raster5 = pcraster.ldd(1)
    tmp5 = copy.deepcopy(raster5)
    self.assertEqual(True, self.arbitraryMapEquals(raster5, tmp5))
示例#52
0
def detRealCellLength(ZeroMap, sizeinmetres):
    """
    Determine cellength. Always returns the length
    in meters.
    """

    if sizeinmetres:
        reallength = pcr.celllength()
        xl = pcr.celllength()
        yl = pcr.celllength()
    else:
        aa = pcr.ycoordinate(pcr.boolean(pcr.cover(ZeroMap + 1, 1)))
        yl, xl = lattometres(aa)

        xl = xl * pcr.celllength()
        yl = yl * pcr.celllength()
        # Average length for surface area calculations.

        reallength = (xl + yl) * 0.5

    return xl, yl, reallength
    def identifyModelPixel(self,tmpDir,\
                                catchmentAreaAll,\
                                landMaskClass,\
                                xCoordinate,yCoordinate,id):     

        # TODO: Include an option to consider average discharge. 
        
        logger.info("Identify model pixel for the grdc station "+str(id)+".")
        
        # make a temporary directory:
        randomDir = self.makeRandomDir(tmpDir) 

        # coordinate of grdc station
        xCoord  = float(self.attributeGRDC["grdc_longitude_in_arc_degree"][str(id)])
        yCoord  = float(self.attributeGRDC["grdc_latitude_in_arc_degree"][str(id)])
        
        # identify the point at pcraster model
        point = pcr.ifthen((pcr.abs(xCoordinate - xCoord) == pcr.mapminimum(pcr.abs(xCoordinate - xCoord))) &\
                           (pcr.abs(yCoordinate - yCoord) == pcr.mapminimum(pcr.abs(yCoordinate - yCoord))), \
                            pcr.boolean(1))
        
        # expanding the point
        point = pcr.windowmajority(point, self.cell_size_in_arc_degree * 5.0)
        point = pcr.ifthen(catchmentAreaAll > 0, point)
        point = pcr.boolean(point)

        # values based on the model;
        modelCatchmentArea = pcr.ifthen(point, catchmentAreaAll)        # unit: km2
        model_x_ccordinate = pcr.ifthen(point, xCoordinate)             # unit: arc degree
        model_y_ccordinate = pcr.ifthen(point, yCoordinate)             # unit: arc degree
        
        # calculate (absolute) difference with GRDC data
        # - initiating all of them with the values of MV
        diffCatchArea = pcr.abs(pcr.scalar(vos.MV))        # difference between the model and grdc catchment area (unit: km2) 
        diffDistance  = pcr.abs(pcr.scalar(vos.MV))        # distance between the model pixel and grdc catchment station (unit: arc degree)
        diffLongitude = pcr.abs(pcr.scalar(vos.MV))        # longitude difference (unit: arc degree)
        diffLatitude  = pcr.abs(pcr.scalar(vos.MV))        # latitude difference (unit: arc degree)
        #
        # - calculate (absolute) difference with GRDC data
        try:
            diffCatchArea = pcr.abs(modelCatchmentArea-\
                            float(self.attributeGRDC["grdc_catchment_area_in_km2"][str(id)]))
        except:
            logger.info("The difference in the model and grdc catchment area cannot be calculated.")
        try:
            diffLongitude = pcr.abs(model_x_ccordinate - xCoord)
        except:
            logger.info("The difference in longitude cannot be calculated.")
        try:
            diffLatitude  = pcr.abs(model_y_ccordinate - yCoord)
        except:
            logger.info("The difference in latitude cannot be calculated.")
        try:
            diffDistance  = (diffLongitude**(2) + \
                              diffLatitude**(2))**(0.5)                 # TODO: calculate distance in meter
        except:
            logger.info("Distance cannot be calculated.")
        
        # identify  masks
        masks = pcr.ifthen(pcr.boolean(point), landMaskClass)                                          

        # export the difference to temporary files: maps and txt
        catchmentAreaMap = randomDir+"/"+vos.get_random_word()+".area.map"
        diffCatchAreaMap = randomDir+"/"+vos.get_random_word()+".dare.map"
        diffDistanceMap  = randomDir+"/"+vos.get_random_word()+".dist.map"
        diffLatitudeMap  = randomDir+"/"+vos.get_random_word()+".dlat.map"
        diffLongitudeMap = randomDir+"/"+vos.get_random_word()+".dlon.map"
        diffLatitudeMap  = randomDir+"/"+vos.get_random_word()+".dlat.map"
        #
        maskMap          = randomDir+"/"+vos.get_random_word()+".mask.map"
        diffColumnFile   = randomDir+"/"+vos.get_random_word()+".cols.txt" # output
        #
        pcr.report(pcr.ifthen(point,modelCatchmentArea), catchmentAreaMap)
        pcr.report(pcr.ifthen(point,diffCatchArea     ), diffCatchAreaMap)
        pcr.report(pcr.ifthen(point,diffDistance      ), diffDistanceMap )
        pcr.report(pcr.ifthen(point,diffLatitude      ), diffLongitudeMap)
        pcr.report(pcr.ifthen(point,diffLongitude     ), diffLatitudeMap )
        pcr.report(pcr.ifthen(point,masks             ), maskMap)
        #
        cmd = 'map2col '+catchmentAreaMap +' '+\
                         diffCatchAreaMap +' '+\
                         diffDistanceMap  +' '+\
                         diffLongitudeMap +' '+\
                         diffLatitudeMap  +' '+\
                         maskMap+' '+diffColumnFile
        print(cmd); os.system(cmd) 
        
        # use R to sort the file
        cmd = 'R -f saveIdentifiedPixels.R '+diffColumnFile
        print(cmd); os.system(cmd) 
        
        try:
            # read the output file (from R)
            f = open(diffColumnFile+".sel") ; allLines = f.read() ; f.close()
        
            # split the content of the file into several lines
            allLines = allLines.replace("\r",""); allLines = allLines.split("\n")
        
            selectedPixel = allLines[0].split(";")

            model_longitude_in_arc_degree = float(selectedPixel[0])
            model_latitude_in_arc_degree  = float(selectedPixel[1])
            model_catchment_area_in_km2   = float(selectedPixel[2])
            model_landmask                = str(selectedPixel[7])
            
            log_message  = "Model pixel for grdc station "+str(id)+" is identified (lat/lon in arc degree): "
            log_message += str(model_latitude_in_arc_degree) + " ; " +  str(model_longitude_in_arc_degree)
            logger.info(log_message)
            
            self.attributeGRDC["model_longitude_in_arc_degree"][str(id)] = model_longitude_in_arc_degree 
            self.attributeGRDC["model_latitude_in_arc_degree"][str(id)]  = model_latitude_in_arc_degree  
            self.attributeGRDC["model_catchment_area_in_km2"][str(id)]   = model_catchment_area_in_km2   
            self.attributeGRDC["model_landmask"][str(id)]                = model_landmask                

        except:
        
            logger.info("Model pixel for grdc station "+str(id)+" can NOT be identified.")
        
        self.cleanRandomDir(randomDir)
    def evaluateAllModelResults(self,globalCloneMapFileName,\
                                catchmentClassFileName,\
                                lddMapFileName,\
                                cellAreaMapFileName,\
                                pcrglobwb_output,\
                                analysisOutputDir="",\
                                tmpDir = None):     

        # temporary directory
        if tmpDir == None: tmpDir = self.tmpDir+"/edwin_grdc_"
        
        # output directory for all analyses for all stations
        analysisOutputDir   = str(analysisOutputDir)
        self.chartOutputDir = analysisOutputDir+"/chart/"
        self.tableOutputDir = analysisOutputDir+"/table/"
        #
        if analysisOutputDir == "": self.chartOutputDir = "chart/"
        if analysisOutputDir == "": self.tableOutputDir = "table/"
        #
        # make the chart and table directories:
        os.system('rm -r '+self.chartOutputDir+"*")
        os.system('rm -r '+self.tableOutputDir+"*")
        os.makedirs(self.chartOutputDir)
        os.makedirs(self.tableOutputDir)
        
        # cloneMap for all pcraster operations
        pcr.setclone(globalCloneMapFileName)
        cloneMap = pcr.boolean(1)
        self.cell_size_in_arc_degree = vos.getMapAttributesALL(globalCloneMapFileName)['cellsize']
        
        lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))
        cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))
        
        # The landMaskClass map contains the nominal classes for all landmask regions. 
        landMaskClass = pcr.nominal(cloneMap)  # default: if catchmentClassFileName is not given
        if catchmentClassFileName != None:
            landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))

        # model catchment areas and cordinates
        catchmentAreaAll = pcr.catchmenttotal(cellArea, lddMap) / (1000*1000)  # unit: km2
        xCoordinate = pcr.xcoordinate(cloneMap)
        yCoordinate = pcr.ycoordinate(cloneMap)
        
        for id in self.list_of_grdc_ids: 

            logger.info("Evaluating simulated discharge to the grdc observation at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".")
            
            # identify model pixel
            self.identifyModelPixel(tmpDir,catchmentAreaAll,landMaskClass,xCoordinate,yCoordinate,str(id))

            # evaluate model results to GRDC data
            self.evaluateModelResultsToGRDC(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)
            
        # write the summary to a table 
        summary_file = analysisOutputDir+"summary.txt"
        #
        logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".")
        #
        # prepare the file:
        summary_file_handle = open(summary_file,"w")
        #
        # write the header
        summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n")
        #
        # write the content
        for id in self.list_of_grdc_ids:
            rowLine  = ""
            for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";"   
            rowLine = rowLine[0:-1] + "\n"
            summary_file_handle.write(rowLine)
        summary_file_handle.close()           
# set the pcraster clone, ldd, landmask, and cell area map 
msg = "Setting the clone, ldd, landmask, and cell area maps" + ":"
logger.info(msg)
# - clone 
clone_map_file = input_files['clone_map_05min']
pcr.setclone(clone_map_file)
# - ldd
ldd = vos.readPCRmapClone(input_files['ldd_map_05min'],
                          clone_map_file,
                          output_files['tmp_folder'],
                          None,
                          True)
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask  = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))
# - cell area
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'],
                          clone_map_file,
                          output_files['tmp_folder'])


# read the hydrological year 
msg = "Reading the hydrological year types" + ":"
logger.info(msg)
hydro_year_type = pcr.nominal(\
                  vos.readPCRmapClone(input_files['hydro_year_05min'],
                                      input_files['clone_map_05min'],
                                      output_files['tmp_folder'],
                                      None, False, None, True))
hydro_year_type = pcr.cover(hydro_year_type, pcr.nominal(1.0))
示例#56
0
  def _parseLine(self, line, lineNumber, nrColumns, externalNames, keyDict):

    line = re.sub("\n","",line)
    line = re.sub("\t"," ",line)
    result = None

    # read until first comment
    content = ""
    content,sep,comment = line.partition("#")
    if len(content) > 1:
      collectionVariableName, sep, tail = content.partition(" ")
      if collectionVariableName == self._varName:
        tail = tail.strip()
        key, sep, variableValue = tail.rpartition(" ")

        if len(key.split()) != nrColumns:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, order of columns given (%s columns) does not match expected order of %s columns" %(self._fileName, lineNumber, len(key.split()) + 2, int(nrColumns) + 2)
          raise ValueError(msg)

        variableValue = re.sub('\"', "", variableValue)

        tmp = None
        try:
          tmp = int(variableValue)
          if self._dataType == pcraster.Boolean:
            tmp = pcraster.boolean(tmp)
          elif self._dataType == pcraster.Nominal:
            tmp = pcraster.nominal(tmp)
          elif self._dataType == pcraster.Ordinal:
            tmp = pcraster.ordinal(tmp)
          elif self._dataType == pcraster.Ldd:
            tmp = pcraster.ldd(tmp)
          else:
            msg = "Conversion to %s failed" % (self._dataType)
            raise Exception(msg)
        except ValueError as e:
          try:
            tmp = float(variableValue)
            if self._dataType == pcraster.Scalar:
              tmp = pcraster.scalar(tmp)
            elif self._dataType == pcraster.Directional:
              tmp = pcraster.directional(tmp)
            else:
              msg = "Conversion to %s failed" % (self._dataType)
              raise Exception(msg)

          except ValueError as e:
            variableValue = re.sub("\\\\","/",variableValue)
            variableValue = variableValue.strip()
            path = os.path.normpath(variableValue)
            try:
              tmp = pcraster.readmap(path)
            except RuntimeError as e:
              msg = "Error reading %s line %d, %s" %(self._fileName, lineNumber, e)
              raise ValueError(msg)

        # test if key is an external name
        transformedKeys = []
        counter = 0

        for k in key.split():
          k = k.strip()
          if externalNames[counter].get(k):
            transformedKeys.append(externalNames[counter].get(k))
          else:
            transformedKeys.append(k)
          counter += 1

        key = tuple(transformedKeys)

        if not key in keyDict:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, %s unknown collection index" %(self._fileName, lineNumber, tmp)
          raise ValueError(msg)


        if not keyDict[key] is None:
          tmp = re.sub("\(|\)|,","",str(key))
          msg = "Error reading %s line %d, %s %s already initialised" %(self._fileName, lineNumber, self._varName, tmp)
          raise ValueError(msg)

        keyDict[key] = tmp
    max_step = 5
    for i in range(1, max_step+1, 1):
        cmd = "Extending class: step "+str(i)+" from " + str(max_step)
        print(cmd)
        uniqueIDs = pcr.cover(uniqueIDs, pcr.windowmajority(uniqueIDs, 0.5))
    # - use only cells within the landmask
    uniqueIDs = pcr.ifthen(landmask, uniqueIDs)
    pcr.report(uniqueIDs, "class_ids.map")                                
    
    # cell area at 5 arc min resolution
    cellArea = vos.readPCRmapClone(cellArea05minFile,
                                   cloneMapFileName, tmp_directory)
    cellArea = pcr.ifthen(landmask, cellArea)
    
    # get a sample cell for every id
    x_min_for_each_id = pcr.areaminimum(pcr.xcoordinate(pcr.boolean(1.0)), uniqueIDs)
    sample_cells      = pcr.xcoordinate(pcr.boolean(1.0)) == x_min_for_each_id
    y_min_for_each_id = pcr.areaminimum(pcr.ycoordinate(sample_cells), uniqueIDs)
    sample_cells      = pcr.ycoordinate(sample_cells) == y_min_for_each_id
    uniqueIDs_sample  = pcr.ifthen(sample_cells, uniqueIDs)
    # - save it to a pcraster map file
    pcr.report(uniqueIDs_sample, "sample.ids")                                

    # calculate the country values 
    index = 0 # for posCnt
    for iYear in range(staYear,endYear+1):
        
        # time stamp and index for netcdf files:
        index = index + 1
        timeStamp = datetime.datetime(int(iYear), int(12), int(31), int(0))
        fulldate = '%4i-%02i-%02i'  %(int(iYear), int(12), int(31))
# set the pcraster clone, ldd, landmask, and cell area map 
msg = "Setting the clone, ldd, landmask, and cell area maps" + ":"
logger.info(msg)
# - clone 
clone_map_file = input_files['clone_map_05min']
pcr.setclone(clone_map_file)
# - ldd
ldd = vos.readPCRmapClone(input_files['ldd_map_05min'],
                          clone_map_file,
                          output_files['tmp_folder'],
                          None,
                          True)
ldd = pcr.lddrepair(pcr.ldd(ldd))
ldd = pcr.lddrepair(ldd)
# - landmask
landmask  = pcr.ifthen(pcr.defined(ldd), pcr.boolean(1.0))
# - cell area
cell_area = vos.readPCRmapClone(input_files['cell_area_05min'],
                          clone_map_file,
                          output_files['tmp_folder'])


# set the basin map
msg = "Setting the basin map" + ":"
logger.info(msg)
basin_map = pcr.nominal(\
            vos.readPCRmapClone(input_files['basin_map_05min'],
                                input_files['clone_map_05min'],
                                output_files['tmp_folder'],
                                None, False, None, True))
#~ pcr.aguila(basin_map)
示例#59
0
def subcatch_stream(ldd, threshold, stream=None, min_strahler=-999, max_strahler=999, assign_edge=False, assign_existing=False, up_area=None, basin=None):
    """
    Derive catchments based upon strahler threshold
    Input:
        ldd -- pcraster object direction, local drain directions
        threshold -- integer, strahler threshold, subcatchments ge threshold
            are derived
        stream=None -- pcraster object ordinal, stream order map (made with pcr.streamorder), if provided, stream order
            map is not generated on the fly but used from this map. Useful when a subdomain within a catchment is
            provided, which would cause edge effects in the stream order map
        min_strahler=-999 -- integer, minimum strahler threshold of river catchments
            to return
        max_strahler=999 -- integer, maximum strahler threshold of river catchments
            to return
        assign_unique=False -- if set to True, unassigned connected areas at
            the edges of the domain are assigned a unique id as well. If set
            to False, edges are not assigned
        assign_existing=False == if set to True, unassigned edges are assigned
            to existing basins with an upstream weighting. If set to False,
            edges are assigned to unique IDs, or not assigned
    output:
        stream_ge -- pcraster object, streams of strahler order ge threshold
        subcatch -- pcraster object, subcatchments of strahler order ge threshold

    """
    # derive stream order

    if stream is None:
        stream = pcr.streamorder(ldd)

    stream_ge = pcr.ifthen(stream >= threshold, stream)
    stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
    # detect any transfer of strahler order, to a higher strahler order.
    transition_strahler = pcr.ifthenelse(pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
                                         pcr.ifthenelse(pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse(pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1),
                                                                                                              pcr.boolean(0))))
    # make unique ids (write to file)
    transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))

    # derive upstream catchment areas (write to file)
    subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
    # mask out areas outside basin
    if basin is not None:
        subcatch = pcr.ifthen(basin, subcatch)

    if assign_edge:
        # fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far
        unique_edge = pcr.clump(pcr.ifthen(subcatch==0, pcr.ordinal(0)))
        subcatch = pcr.ifthenelse(subcatch==0, pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)), pcr.nominal(subcatch))
    elif assign_existing:
        # unaccounted areas are added to largest nearest draining basin
        if up_area is None:
            up_area = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1))
        riverid = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), subcatch)

        friction = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)) # *(pcr.scalar(ldd)*0+1)
        delta = pcr.ifthen(pcr.scalar(ldd)>=0, pcr.ifthen(pcr.cover(subcatch, 0)==0, pcr.spreadzone(pcr.cover(riverid, 0), 0, friction)))
        subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)),
                                      subcatch,
                                      delta)

    # finally, only keep basins with minimum and maximum river order flowing through them
    strahler_subcatch = pcr.areamaximum(stream, subcatch)
    subcatch = pcr.ifthen(pcr.ordinal(strahler_subcatch) >= min_strahler, pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch))

    return stream_ge, pcr.ordinal(subcatch)
    def evaluateAllBaseflowResults(self,globalCloneMapFileName,\
                                   catchmentClassFileName,\
                                   lddMapFileName,\
                                   cellAreaMapFileName,\
                                   pcrglobwb_output,\
                                   analysisOutputDir="",\
                                   tmpDir = None):     

        # temporary directory
        if tmpDir == None: tmpDir = self.tmpDir+"/edwin_iwmi_"

        # output directory for all analyses for all stations
        analysisOutputDir   = str(analysisOutputDir)
        self.chartOutputDir = analysisOutputDir+"/chart/"
        self.tableOutputDir = analysisOutputDir+"/table/"
        #
        if analysisOutputDir == "": self.chartOutputDir = "chart/"
        if analysisOutputDir == "": self.tableOutputDir = "table/"
        #
        # make the chart and table directories:
        os.system('rm -r '+self.chartOutputDir+"*")
        os.system('rm -r '+self.tableOutputDir+"*")
        os.makedirs(self.chartOutputDir)
        os.makedirs(self.tableOutputDir)
        
        # cloneMap for all pcraster operations
        pcr.setclone(globalCloneMapFileName)
        cloneMap = pcr.boolean(1)
        
        lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))
        cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))
        
        # The landMaskClass map contains the nominal classes for all landmask regions. 
        landMaskClass = pcr.nominal(cloneMap)  # default: if catchmentClassFileName is not given
        if catchmentClassFileName != None:
            landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))

        for id in self.list_of_grdc_ids: 

            logger.info("Evaluating simulated annual baseflow time series to IWMI baseflow time series at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".")
            
            # evaluate model results to GRDC data
            self.evaluateBaseflowResult(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)
            
        # write the summary to a table 
        summary_file = analysisOutputDir+"baseflow_summary.txt"
        #
        logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".")
        #
        # prepare the file:
        summary_file_handle = open(summary_file,"w")
        #
        # write the header
        summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n")
        #
        # write the content
        for id in self.list_of_grdc_ids:
            rowLine  = ""
            for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";"   
            rowLine = rowLine[0:-1] + "\n"
            summary_file_handle.write(rowLine)
        summary_file_handle.close()