示例#1
0
def getgridparams():
    """ return grid parameters in a python friendly way

    Output:
        [ Xul, Yul, xsize, ysize, rows, cols]

        - xul - x upper left centre
        - yul - y upper left centre
        - xsize - size of a cell in x direction
        - ysize - size of a cell in y direction
        - cols - number of columns
        - rows - number of rows
        - xlr -  x lower right centre
        - ylr -  y lower right centre
    """
    # This is the default, but add for safety...
    pcr.setglobaloption("coorcentre")
    # x and Y are the same for now
    xy = pcr.pcr2numpy(pcr.celllength(), np.nan)[0, 0]
    xu = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[0, 0]
    yu = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[0, 0]
    ylr = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[getrows() - 1, getcols() - 1]
    xlr = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[getrows() - 1, getcols() - 1]

    return [xu, yu, xy, xy, getrows(), getcols(), xlr, ylr]
示例#2
0
def getgridparams():
    """ return grid parameters in a python friendly way

    Output:
        [ Xul, Yul, xsize, ysize, rows, cols]

        - xul - x upper left centre
        - yul - y upper left centre
        - xsize - size of a cell in x direction
        - ysize - size of a cell in y direction
        - cols - number of columns
        - rows - number of rows
        - xlr -  x lower right centre
        - ylr -  y lower right centre
    """
    # This is the default, but add for safety...
    pcr.setglobaloption("coorcentre")
    # x and Y are the same for now
    xy = pcr.pcr2numpy(pcr.celllength(), np.nan)[0, 0]
    xu = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[0, 0]
    yu = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[0, 0]
    ylr = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[getrows() - 1, getcols() - 1]
    xlr = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[getrows() - 1, getcols() - 1]

    return [xu, yu, xy, xy, getrows(), getcols(), xlr, ylr]
示例#3
0
def checkerboard(mapin, fcc):
    """
    checkerboard create a checkerboard map with unique id's in a
    fcc*fcc cells area. The resulting map can be used
    to derive statistics for (later) upscaling of maps (using the fcc factor)

    .. warning: use with unitcell to get most reliable results!

    Input:
        - map (used to determine coordinates)
        - fcc (size of the areas in cells)

    Output:
        - checkerboard type map
    """
    msker = pcr.defined(mapin)
    ymin = pcr.mapminimum(pcr.ycoordinate(msker))
    yc = (pcr.ycoordinate((msker)) - ymin) / pcr.celllength()
    yc = pcr.rounddown(yc / fcc)
    # yc = yc/fcc
    xmin = pcr.mapminimum(pcr.xcoordinate((msker)))
    xc = (pcr.xcoordinate((msker)) - xmin) / pcr.celllength()
    xc = pcr.rounddown(xc / fcc)
    # xc = xc/fcc

    yc = yc * (pcr.mapmaximum(xc) + 1.0)

    xy = pcr.ordinal(xc + yc)

    return xy
示例#4
0
def checkerboard(mapin, fcc):
    """
    checkerboard create a checkerboard map with unique id's in a
    fcc*fcc cells area. The resulting map can be used
    to derive statistics for (later) upscaling of maps (using the fcc factor)

    .. warning: use with unitcell to get most reliable results!

    Input:
        - map (used to determine coordinates)
        - fcc (size of the areas in cells)

    Output:
        - checkerboard type map
    """
    msker = pcr.defined(mapin)
    ymin = pcr.mapminimum(pcr.ycoordinate(msker))
    yc = (pcr.ycoordinate((msker)) - ymin) / pcr.celllength()
    yc = pcr.rounddown(yc / fcc)
    # yc = yc/fcc
    xmin = pcr.mapminimum(pcr.xcoordinate((msker)))
    xc = (pcr.xcoordinate((msker)) - xmin) / pcr.celllength()
    xc = pcr.rounddown(xc / fcc)
    # xc = xc/fcc

    yc = yc * (pcr.mapmaximum(xc) + 1.0)

    xy = pcr.ordinal(xc + yc)

    return xy
示例#5
0
def getValAtPoint(in_map, xcor, ycor):
    """
    returns the value in a map at the point given.
    works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - value
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    themap = pcr.pcr2numpy(in_map, np.nan)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_
    pt = point.argmax()

    return themap.ravel()[pt]
示例#6
0
def getRowColPoint(in_map, xcor, ycor):
    """
    returns the row and col in a map at the point given.
    Works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - row, column
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_

    return point.argmax(0).max(), point.argmax(1).max()
示例#7
0
 def spatial(self):
     """Computes requruired biosafe output for a spatial domain"""
     
     #-determine a representative points for each floodplain section        
     points = pcrr.representativePoint(self.sections)
     clone = pcr.defined(self.sections)
     pcr.setglobaloption('unittrue')
     xcoor = pcr.xcoordinate(clone)
     ycoor = pcr.ycoordinate(clone)
     geoDf = pcrr.getCellValues(points, \
                             mapList = [points, xcoor, ycoor],\
                             columns = ['ID', 'xcoor', 'ycoor'])        
     geoDf.set_index('ID', inplace=True, drop=False)
     geoDf.drop(['rowIdx', 'colIdx', 'ID'], axis=1, inplace=True)
     
     #-compupte the required biosafe parameters for all sections
     sectionIDs = np.unique(pcr.pcr2numpy(self.sections,-9999))[1:]
     ll = []
     for sectionID in sectionIDs:
         ll.append(self.sectionScores(sectionID))
     paramLL = zip(*ll)
             
     dfParamLL = []
     for ii in range(len(self.params)):
         bsScores = pd.concat(paramLL[ii], axis=1).T
         bsScores = bsScores.join(geoDf)
         bsScores.index.name = 'ID'
         bsScores.columns.name = self.params[ii]
         dfParamLL.append(bsScores)
     
     return dfParamLL
示例#8
0
    def __init__(self, iniItems):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(pcr2numpy(pcr.ycoordinate(cloneMap),
                                             vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # TODO: Let users decide what their preference regarding latitude order.
        #       Consult with Stefanie regarding CF convention.

        # netCDF format and attributes:
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = iniItems.globalOptions[
            'institution']
        self.attributeDictionary['title'] = iniItems.globalOptions['title']
        self.attributeDictionary['description'] = iniItems.globalOptions[
            'description']

        # netcdf format and zlib setup
        self.format = 'NETCDF3_CLASSIC'
        self.zlib = False
        if "formatNetCDF" in iniItems.reportingOptions.keys():
            self.format = str(iniItems.reportingOptions['formatNetCDF'])
        if "zlib" in iniItems.reportingOptions.keys():
            if iniItems.reportingOptions['zlib'] == "True": self.zlib = True
示例#9
0
    def __init__(self,configuration,model,specificAttributeDictionary=None):

	# Set clone map
        pcr.setclone(configuration.cloneMap)
        cloneMap = pcr.boolean(1.0)  # map with all cell values equal to 1

        # Retrieve latitudes and longitudes from clone map
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        self.crops  = np.arange(1, model.nCrop + 1)
        self.depths = np.arange(1, model.nComp + 1)
        
        # Let users decide what their preference regarding latitude order
        self.netcdf_y_orientation_follow_cf_convention = False
        if 'netcdf_y_orientation_follow_cf_convention' in configuration.reportingOptions.keys() and\
            configuration.reportingOptions['netcdf_y_orientation_follow_cf_convention'] == "True":
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))
        
        # Set general netcdf attributes (based on the information given in the ini/configuration file) 
        self.set_general_netcdf_attributes(configuration, specificAttributeDictionary)
        
        # netcdf format and zlib setup 
        self.format = 'NETCDF3_CLASSIC'
        self.zlib = False
        if "formatNetCDF" in configuration.reportingOptions.keys():
            self.format = str(configuration.reportingOptions['formatNetCDF'])
        if "zlib" in configuration.reportingOptions.keys():
            if configuration.reportingOptions['zlib'] == "True": self.zlib = True
示例#10
0
def getRowColPoint(in_map, xcor, ycor):
    """
    returns the row and col in a map at the point given.
    Works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - row, column
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_

    return point.argmax(0).max(), point.argmax(1).max()
    def __init__(self,
                 cloneMapFileName,
                 resetClone=None,
                 attributeDictionary=None):

        # cloneMap
        if resetClone != None: pcr.setclone(cloneMapFileName)
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # reset clone (if necessary)
        if resetClone != None: pcr.setclone(resetClone)

        # netcdf format:
        self.format = 'NETCDF3_CLASSIC'

        self.attributeDictionary = {}
        if attributeDictionary == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'] = "None"
            self.attributeDictionary['source'] = "None"
            self.attributeDictionary['history'] = "None"
            self.attributeDictionary['references'] = "None"
            self.attributeDictionary['description'] = "None"
            self.attributeDictionary['comment'] = "None"
        else:
            self.attributeDictionary = attributeDictionary
示例#12
0
    def __init__(self, netcdffile, logging):
        """
        First try to setup a class read netcdf files
        (converted with pcr2netcdf.py)

        netcdffile: file to read the forcing data from
        logging: python logging object
        vars: list of variables to get from file
        """

        if os.path.exists(netcdffile):
            self.dataset = netCDF4.Dataset(netcdffile, mode="r")
        else:
            msg = os.path.abspath(netcdffile) + " not found!"
            logging.error(msg)
            raise ValueError(msg)

        try:
            self.x = self.dataset.variables["x"][:]
        except:
            self.x = self.dataset.variables["lon"][:]
        # Now check Y values to see if we must flip the data
        try:
            self.y = self.dataset.variables["y"][:]
        except:
            self.y = self.dataset.variables["lat"][:]

        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0]

        (self.latidx,) = np.logical_and(self.x >= x.min(), self.x < x.max()).nonzero()
        (self.lonidx,) = np.logical_and(self.y >= x.min(), self.y < y.max()).nonzero()

        logging.info("Reading static input from netCDF file: " + netcdffile)
示例#13
0
def getCoordinates(cloneMap, MV=-9999):
    '''returns cell centre coordinates for a clone map as numpy array
	   return longitudes, latitudes '''
    cln = pcr.cover(pcr.boolean(cloneMap), pcr.boolean(1))
    xMap = pcr.xcoordinate(cln)
    yMap = pcr.ycoordinate(cln)
    return pcr.pcr2numpy(xMap, MV)[1, :], pcr.pcr2numpy(yMap, MV)[:, 1]
示例#14
0
def getValAtPoint(in_map, xcor, ycor):
    """
    returns the value in a map at the point given.
    works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - value
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    themap = pcr.pcr2numpy(in_map, np.nan)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_
    pt = point.argmax()

    return themap.ravel()[pt]
示例#15
0
    def __init__(self, cloneMapFileName, netcdf_attribute_description):

        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = "European Commission - JRC"
        self.attributeDictionary[
            'title'] = "EFAS-Meteo 5km for the Rhine-Meuse basin"
        self.attributeDictionary[
            'source'] = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary[
            'history'] = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014."
        self.attributeDictionary[
            'references'] = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary[
            'comment'] = "Please use this dataset only for Hyper-Hydro test bed experiments. "
        self.attributeDictionary[
            'comment'] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary[
            'comment'] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "

        self.attributeDictionary['description'] = netcdf_attribute_description
示例#16
0
    def __init__(self, iniItems, specificAttributeDictionary=None):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[
            ::-1
        ]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # Let users decide what their preference regarding latitude order.
        self.netcdf_y_orientation_follow_cf_convention = False
        if (
            "netcdf_y_orientation_follow_cf_convention"
            in list(iniItems.reportingOptions.keys())
            and iniItems.reportingOptions["netcdf_y_orientation_follow_cf_convention"]
            == "True"
        ):
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))

        # set the general netcdf attributes (based on the information given in the ini/configuration file)
        self.set_general_netcdf_attributes(iniItems, specificAttributeDictionary)

        # netcdf format and zlib setup
        self.format = "NETCDF3_CLASSIC"
        self.zlib = False
        if "formatNetCDF" in list(iniItems.reportingOptions.keys()):
            self.format = str(iniItems.reportingOptions["formatNetCDF"])
        if "zlib" in list(iniItems.reportingOptions.keys()):
            if iniItems.reportingOptions["zlib"] == "True":
                self.zlib = True

        # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files'
        if "specific_attributes_for_netcdf_output_files" in iniItems.allSections:
            for key in list(
                iniItems.specific_attributes_for_netcdf_output_files.keys()
            ):

                self.attributeDictionary[
                    key
                ] = iniItems.specific_attributes_for_netcdf_output_files[key]

                if self.attributeDictionary[key] == "None":
                    self.attributeDictionary[key] = ""

                if key == "history" and self.attributeDictionary[key] == "Default":
                    self.attributeDictionary[
                        key
                    ] = "created on " + datetime.datetime.today().isoformat(" ")
                if self.attributeDictionary[key] == "Default" and (
                    key == "date_created" or key == "date_issued"
                ):
                    self.attributeDictionary[key] = datetime.datetime.today().isoformat(
                        " "
                    )
示例#17
0
    def __init__(self, iniItems, specificAttributeDictionary=None):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # Let users decide what their preference regarding latitude order.
        self.netcdf_y_orientation_follow_cf_convention = False
        if ("netcdf_y_orientation_follow_cf_convention" in list(
                iniItems.reportingOptions.keys()) and iniItems.
                reportingOptions["netcdf_y_orientation_follow_cf_convention"]
                == "True"):
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes = np.unique(
                pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))

        # set the general netcdf attributes (based on the information given in the ini/configuration file)
        self.set_general_netcdf_attributes(iniItems,
                                           specificAttributeDictionary)

        # netcdf format and zlib setup
        self.format = "NETCDF3_CLASSIC"
        self.zlib = False
        if "formatNetCDF" in list(iniItems.reportingOptions.keys()):
            self.format = str(iniItems.reportingOptions["formatNetCDF"])
        if "zlib" in list(iniItems.reportingOptions.keys()):
            if iniItems.reportingOptions["zlib"] == "True":
                self.zlib = True

        # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files'
        if "specific_attributes_for_netcdf_output_files" in iniItems.allSections:
            for key in list(
                    iniItems.specific_attributes_for_netcdf_output_files.keys(
                    )):

                self.attributeDictionary[
                    key] = iniItems.specific_attributes_for_netcdf_output_files[
                        key]

                if self.attributeDictionary[key] == "None":
                    self.attributeDictionary[key] = ""

                if key == "history" and self.attributeDictionary[
                        key] == "Default":
                    self.attributeDictionary[
                        key] = "created on " + datetime.datetime.today(
                        ).isoformat(" ")
                if self.attributeDictionary[key] == "Default" and (
                        key == "date_created" or key == "date_issued"):
                    self.attributeDictionary[key] = datetime.datetime.today(
                    ).isoformat(" ")
示例#18
0
def boundingBox(pcrmap):
    ''' derive the bounding box for a map, return xmin,ymin,xmax,ymax '''
    bb = []
    xcoor = pcr.xcoordinate(pcrmap)
    ycoor = pcr.ycoordinate(pcrmap)
    xmin  = pcr.cellvalue(pcr.mapminimum(xcoor), 1, 1)[0]
    xmax  = pcr.cellvalue(pcr.mapmaximum(xcoor), 1, 1)[0]
    ymin  = pcr.cellvalue(pcr.mapminimum(ycoor), 1, 1)[0]
    ymax  = pcr.cellvalue(pcr.mapmaximum(ycoor), 1, 1)[0]
    return [math.floor(xmin), math.floor(ymin), math.ceil(xmax), math.ceil(ymax)]
示例#19
0
 def get_model_dimensions(self):
     """Function to set model dimensions"""
     self.nLat = int(self.cloneMapAttributes['rows'])
     self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(self.cloneMap), vos.MV))[::-1]
     self.nLon = int(self.cloneMapAttributes['cols'])
     self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(self.cloneMap), vos.MV))
     self.nCell = int(np.sum(self.landmask))
     self.nLayer = 3         # FIXED        
     self.dimensions = {
         'time'     : None,
         'depth'    : np.arange(self.nLayer), # TODO - put nComp in config section [SOIL]
         'lat'      : self.latitudes,
         'lon'      : self.longitudes,
     }
示例#20
0
def map_edges(clone):
    """Boolean map true map edges, false elsewhere"""

    pcr.setglobaloption('unittrue')
    xmin, xmax, ymin, ymax, nr_rows, nr_cols, cell_size = clone_attributes()
    clone = pcr.ifthenelse(pcr.defined(clone), pcr.boolean(1), pcr.boolean(1))
    x_coor = pcr.xcoordinate(clone)
    y_coor = pcr.ycoordinate(clone)
    north = y_coor > (ymax - cell_size)
    south = y_coor < (ymin + cell_size)
    west = x_coor < (xmin + cell_size)
    east = x_coor > (xmax - cell_size)
    edges = north | south | west | east
    return edges
示例#21
0
def pcr2col(listOfMaps, MV, selection='ONE_TRUE'):
    """converts a set of maps to a column array: X, Y, map values
       selection can be set to ALL, ALL_TRUE, ONE_TRUE"""

    #-intersect all maps and get X and Y coordinates
    intersection = pcr.boolean(pcr.cover(listOfMaps[0], 0))
    for mapX in listOfMaps[1:]:
        intersection = intersection | pcr.boolean(pcr.cover(mapX, 0))
    pcr.setglobaloption("unittrue")
    xCoor = pcr.ifthen(intersection, pcr.xcoordinate(intersection))
    yCoor = pcr.ifthen(intersection, pcr.ycoordinate(intersection))
    pcr.setglobaloption("unitcell")

    #-initiate outArray with xCoor and yCoor
    xCoorArr = pcr.pcr2numpy(xCoor, MV)
    yCoorArr = pcr.pcr2numpy(yCoor, MV)
    nRows, nCols = xCoorArr.shape
    nrCells = nRows * nCols
    outArray = np.hstack((xCoorArr.reshape(nrCells,
                                           1), yCoorArr.reshape(nrCells, 1)))

    #-add subsequent maps
    for mapX in listOfMaps:
        arr = pcr.pcr2numpy(mapX, MV).reshape(nrCells, 1)
        outArray = np.hstack((outArray, arr))

    #-subset output based on selection criterium
    ll = []
    nrMaps = len(listOfMaps)
    if selection == 'ONE_TRUE':
        for line in outArray:
            nrMV = len(line[line == MV])
            if nrMV < nrMaps:
                ll.append(line)
            else:
                pass
        outArray = np.array(ll)
    elif selection == 'ALL_TRUE':
        for line in outArray:
            if MV not in line:
                ll.append(line)
            else:
                pass
        outArray = np.array(ll)
    elif selection == 'ALL':
        pass
    return outArray
示例#22
0
def points_to_map(in_map, xcor, ycor, tolerance):
    """
    Returns a map with non zero values at the points defined
    in X, Y pairs. It's goal is to replace the pcraster col2map program.

    tolerance should be 0.5 to select single points
    Performance is not very good and scales linear with the number of points


    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate (array or single value)
        - ycor - y coordinate (array or single value)
        - tolerance - tolerance in cell units. 0.5 selects a single cell\
        10 would select a 10x10 block of cells

    Output:
        - Map with values burned in. 1 for first point, 2 for second and so on
    """
    point = in_map * 0.0

    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    cell_length = float(pcr.celllength())

    # simple check to use both floats and numpy arrays
    try:
        c = xcor.ndim
    except:
        xcor = np.array([xcor])
        ycor = np.array([ycor])

    # Loop over points and "burn in" map
    for n in range(0, xcor.size):
        if Verbose:
            print(n)
        diffx = x - xcor[n]
        diffy = y - ycor[n]
        col_ = np.absolute(diffx) <= (cell_length * tolerance)  # cellsize
        row_ = np.absolute(diffy) <= (cell_length * tolerance)  # cellsize
        point = point + pcr.numpy2pcr(pcr.Scalar,
                                      ((col_ * row_) * (n + 1)), np.nan)

    return pcr.ordinal(point)
示例#23
0
def points_to_map(in_map, xcor, ycor, tolerance):
    """
    Returns a map with non zero values at the points defined
    in X, Y pairs. It's goal is to replace the pcraster col2map program.

    tolerance should be 0.5 to select single points
    Performance is not very good and scales linear with the number of points


    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate (array or single value)
        - ycor - y coordinate (array or single value)
        - tolerance - tolerance in cell units. 0.5 selects a single cell\
        10 would select a 10x10 block of cells

    Output:
        - Map with values burned in. 1 for first point, 2 for second and so on
    """
    point = in_map * 0.0

    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    cell_length = float(pcr.celllength())

    # simple check to use both floats and numpy arrays
    try:
        c = xcor.ndim
    except:
        xcor = np.array([xcor])
        ycor = np.array([ycor])

    # Loop over points and "burn in" map
    for n in range(0, xcor.size):
        if Verbose:
            print(n)
        diffx = x - xcor[n]
        diffy = y - ycor[n]
        col_ = np.absolute(diffx) <= (cell_length * tolerance)  # cellsize
        row_ = np.absolute(diffy) <= (cell_length * tolerance)  # cellsize
        point = point + pcr.numpy2pcr(pcr.Scalar, ((col_ * row_) * (n + 1)), np.nan)

    return pcr.ordinal(point)
    def __init__(self, cloneMapFile, attribute=None, cellSizeInArcMinutes=None):
        		
        # cloneMap
        # - the cloneMap must be at 5 arc min resolution
        cloneMap = pcr.readmap(cloneMapFile)
        cloneMap = pcr.boolean(1.0)
        
        # latitudes and longitudes
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        #~ # properties of the clone map
        #~ # - number of rows and columns
        #~ self.nrRows       = np.round(pcr.clone().nrRows())    
        #~ self.nrCols       = np.round(pcr.clone().nrCols())  
        #~ # - upper right coordinate, unit: arc degree ; must be integer (without decimals)
        #~ self.minLongitude = np.round(pcr.clone().west() , 0)         
        #~ self.maxLatitude  = np.round(pcr.clone().north(), 0)
        #~ # - cell resolution, unit: arc degree
        #~ self.cellSize     = pcr.clone().cellSize()
        #~ if cellSizeInArcMinutes != None: self.cellSize = cellSizeInArcMinutes / 60.0 
        #~ # - lower right coordinate, unit: arc degree ; must be integer (without decimals)
        #~ self.maxLongitude = np.round(self.minLongitude + self.cellSize*self.nrCols, 0)         
        #~ self.minLatitude  = np.round(self.maxLatitude  - self.cellSize*self.nrRows, 0)
        #~ 
        #~ # latitudes and longitudes for netcdf files
        #~ latMin = self.minLatitude  + self.cellSize / 2
        #~ latMax = self.maxLatitude  - self.cellSize / 2
        #~ lonMin = self.minLongitude + self.cellSize / 2
        #~ lonMax = self.maxLongitude - self.cellSize / 2
        #~ self.longitudes = np.arange(lonMin,lonMax+self.cellSize, self.cellSize)
        #~ self.latitudes=   np.arange(latMax,latMin-self.cellSize,-self.cellSize)
        
        # netCDF format and attributes:
        self.format = 'NETCDF4'
        self.attributeDictionary = {}
        if attribute == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'      ] = "None"
            self.attributeDictionary['description'] = "None"
        else:
            self.attributeDictionary = attribute
示例#25
0
    def __init__(self,
                 cloneMapFileName_or_latlonDict,
                 attributeDictionary=None):

        # cloneMap
        if isinstance(cloneMapFileName_or_latlonDict, str):
            # define latitudes and longitudes based on cloneMap
            cloneMapFileName = cloneMapFileName_or_latlonDict
            pcr.setclone(cloneMapFileName)
            cloneMap = pcr.boolean(1.0)
            # latitudes and longitudes
            self.latitudes = np.unique(
                pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
            self.longitudes = np.unique(
                pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        else:
            # define latitudes and longitudes based on latlonDict       # NOT TESTED YET
            latlonDict = cloneMapFileName_or_latlonDict
            self.latitudes = latlonDict['lat']
            self.longitudes = latlonDict['lon']

        # make sure that latitudes are from high to low
        if self.latitudes[-1] > self.latitudes[0]:
            self.latitudes = self.latitudes[::-1]
        if self.longitudes[-1] < self.longitudes[0]:
            self.longitudes = self.longitudes[::-1]

        # netcdf format:
        self.format = 'NETCDF3_CLASSIC'

        self.attributeDictionary = {}
        if attributeDictionary == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'] = "None"
            self.attributeDictionary['source'] = "None"
            self.attributeDictionary['history'] = "None"
            self.attributeDictionary['references'] = "None"
            self.attributeDictionary['description'] = "None"
            self.attributeDictionary['comment'] = "None"
        else:
            self.attributeDictionary = attributeDictionary
示例#26
0
    def __init__(self, clone_map_file_name):

        # cloneMap
        pcr.setclone(clone_map_file_name)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # Let users decide what their preference regarding latitude order.
        self.netcdf_y_orientation_follow_cf_convention = False

        # set the general netcdf attributes
        self.set_general_netcdf_attributes()

        # netcdf format and zlib setup
        self.format = 'NETCDF4'  # 'NETCDF3_CLASSIC'
        self.zlib = True
示例#27
0
    def __init__(self, iniItems):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # TODO: Let users decide what their preference regarding latitude order.
        #       Consult with Stefanie regarding CF convention.

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = iniItems.globalOptions['institution']
        self.attributeDictionary['title'] = iniItems.globalOptions['title']
        self.attributeDictionary['description'] = iniItems.globalOptions['description']
示例#28
0
    def __init__(self, netcdffile, logging):
        """
        First try to setup a class read netcdf files
        (converted with pcr2netcdf.py)

        netcdffile: file to read the forcing data from
        logging: python logging object
        vars: list of variables to get from file
        """

        if os.path.exists(netcdffile):
            self.dataset = netCDF4.Dataset(netcdffile, mode="r")
        else:
            msg = os.path.abspath(netcdffile) + " not found!"
            logging.error(msg)
            raise ValueError(msg)

        try:
            self.x = self.dataset.variables["x"][:]
        except:
            self.x = self.dataset.variables["lon"][:]
        # Now check Y values to see if we must flip the data
        try:
            self.y = self.dataset.variables["y"][:]
        except:
            self.y = self.dataset.variables["lat"][:]

        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))),
                          np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))),
                          np.nan)[:, 0]

        (self.latidx, ) = np.logical_and(self.x >= x.min(),
                                         self.x < x.max()).nonzero()
        (self.lonidx, ) = np.logical_and(self.y >= x.min(),
                                         self.y < y.max()).nonzero()

        logging.info("Reading static input from netCDF file: " + netcdffile)
示例#29
0
    def __init__(self,cloneMapFileName,netcdf_attribute_description):
        		
        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))
        
        # latitudes and longitudes
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution']  = "European Commission - JRC"
        self.attributeDictionary['title'      ]  = "EFAS-Meteo 5km for the Rhine-Meuse basin"
        self.attributeDictionary['source'     ]  = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary['history'    ]  = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014."
        self.attributeDictionary['references' ]  = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary['comment'    ]  = "Please use this dataset only for Hyper-Hydro test bed experiments. " 
        self.attributeDictionary['comment'    ] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary['comment'    ] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "

        self.attributeDictionary['description']  = netcdf_attribute_description
示例#30
0
    def __init__(self, netcdffile, logging, vars=[]):
        """
        First try to setup a class read netcdf files
        (converted with pcr2netcdf.py)

        netcdffile: file to read the forcing data from
        logging: python logging object
        vars: list of variables to get from file
        """

        self.fname = netcdffile
        if os.path.exists(netcdffile):
            self.dataset = netCDF4.Dataset(netcdffile, mode="r")
        else:
            msg = os.path.abspath(netcdffile) + " not found!"
            logging.error(msg)
            raise ValueError(msg)

        logging.info("Reading state input from netCDF file: " + netcdffile)
        self.alldat = {}
        a = pcr.pcr2numpy(pcr.cover(0.0), 0.0).flatten()
        # Determine steps to load in mem based on estimated memory usage
        floatspermb = 1048576 / 4
        maxmb = 40
        self.maxsteps = maxmb * len(a) / floatspermb + 1
        self.fstep = 0
        self.lstep = self.fstep + self.maxsteps

        self.datetime = self.dataset.variables["time"][:]
        if hasattr(self.dataset.variables["time"], "units"):
            self.timeunits = self.dataset.variables["time"].units
        else:
            self.timeunits = "Seconds since 1970-01-01 00:00:00"
        if hasattr(self.dataset.variables["time"], "calendar"):
            self.calendar = self.dataset.variables["time"].calendar
        else:
            self.calendar = "gregorian"
        self.datetimelist = cftime.num2date(self.datetime,
                                            self.timeunits,
                                            calendar=self.calendar)

        try:
            self.x = self.dataset.variables["x"][:]
        except:
            self.x = self.dataset.variables["lon"][:]

        # Now check Y values to see if we must flip the data
        try:
            self.y = self.dataset.variables["y"][:]
        except:
            self.y = self.dataset.variables["lat"][:]

        # test if 1D or 2D array
        if len(self.y.shape) == 1:
            if self.y[0] > self.y[-1]:
                self.flip = False
            else:
                self.flip = True
        else:  # not sure if this works
            self.y = self.y[:][0]
            if self.y[0] > self.y[-1]:
                self.flip = False
            else:
                self.flip = True

        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))),
                          np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))),
                          np.nan)[:, 0]

        # Get average cell size
        acc = (
            np.diff(x).mean() * 0.25
        )  # non-exact match needed becuase of possible rounding problems
        if self.flip:
            (self.latidx, ) = np.logical_and(
                self.y[::-1] + acc >= y.min(),
                self.y[::-1] <= y.max() + acc).nonzero()
            (self.lonidx, ) = np.logical_and(
                self.x + acc >= x.min(), self.x <= x.max() + acc).nonzero()
        else:
            (self.latidx, ) = np.logical_and(
                self.y + acc >= y.min(), self.y <= y.max() + acc).nonzero()
            (self.lonidx, ) = np.logical_and(
                self.x + acc >= x.min(), self.x <= x.max() + acc).nonzero()

        if len(self.lonidx) != len(x):
            logging.error("error in determining X coordinates in netcdf...")
            logging.error("model expects: " + str(x.min()) + " to " +
                          str(x.max()))
            logging.error("got coordinates  netcdf: " + str(self.x.min()) +
                          " to " + str(self.x.max()))
            logging.error("got len from  netcdf x: " + str(len(x)) +
                          " expected " + str(len(self.lonidx)))
            raise ValueError("X coordinates in netcdf do not match model")

        if len(self.latidx) != len(y):
            logging.error("error in determining Y coordinates in netcdf...")
            logging.error("model expects: " + str(y.min()) + " to " +
                          str(y.max()))
            logging.error("got from  netcdf: " + str(self.y.min()) + " to " +
                          str(self.y.max()))
            logging.error("got len from  netcdf y: " + str(len(y)) +
                          " expected " + str(len(self.latidx)))
            raise ValueError("Y coordinates in netcdf do not match model")

        for var in vars:
            try:
                self.alldat[var] = self.dataset.variables[var][self.fstep:self.
                                                               maxsteps]
            except:
                self.alldat.pop(var, None)
                logging.warning("Variable " + var +
                                " not found in netcdf file: " + netcdffile)
示例#31
0
    def initial(self):

        # make sure that you start from the script directory
        os.chdir(self.script_directory)

        # In this part (premcloop), we initiate parameters/variables/objects that are changing throughout all monte carlo samples.

        msg = "\n"
        msg += "Sample number: " + str(self.currentSampleNumber())
        msg += "\n"
        print(msg)

        # conductivities for the BCF package, see: http://pcraster.geo.uu.nl/pcraster/4.1.0/doc/modflow/bcf.html
        inp_soil_conductivity = self.model_setup['soil_conductivity'][
            int(str(self.currentSampleNumber())) - 1]
        self.soil_conductivity = pcr.spatial(pcr.scalar(inp_soil_conductivity))

        #
        # - horizontal and vertical conductivity
        self.hConductivity = self.soil_conductivity
        self.vConductivity = self.hConductivity
        # - for one layer model, vConductivity is just dummy and never used
        # - layer type, we use LAYTYPE = 0 (harmonic mean) and LAYCON = 0 (confined, constant transmissivities and storage coefficients)

        # storage coefficients for the BCF package, see: http://pcraster.geo.uu.nl/pcraster/4.1.0/doc/modflow/bcf.html
        # - sand porosity (m3.m-3)                                                                                         # TODO: Find the value from Sebastian paper.
        self.sand_porosity = pcr.spatial(pcr.scalar(0.25))
        #
        # - primary and secondary storage coefficients
        self.primary_storage_coefficient = self.sand_porosity
        self.secondary_storage_coefficient = self.primary_storage_coefficient
        # - for LAYCON = 0 (and 1), secondary_storage_coefficient is just dummy and never used

        # The following are CURRENTLY just the same for all samples.
        ############################################################################

        # defining the layer (one layer model), thickness (m), top and bottom elevations
        self.thickness = 15.0
        # - thickness value is suggested by Kim Cohen (put reference here)
        self.top_elevation = self.input_dem
        self.bottom_elevation = self.top_elevation - self.thickness

        # DIS parameters, see http://pcraster.geo.uu.nl/pcraster/4.1.0/doc/modflow/dis.html
        #  - time and spatial units
        self.ITMUNI = 4  # indicating that the time unit is "days"
        self.LENUNI = 2  # indicating that the spatial unit is "meters"
        # - PERLEN: duration of stress period (days)
        # -- 10 minute stress period = 600 seconds stress period
        self.length_of_stress_period = 600. / (24. * 60. * 60.)
        self.PERLEN = self.length_of_stress_period
        # - NSTP: number of sub time steps within the PERLEN
        self.NSTP = 1
        # - TSMULT # always 1 by default
        self.TSMULT = 1
        # - SSTR: transient (0) or steady state (1)
        self.SSTR = 0

        # values for the IBOND of the BAS package, see: http://pcraster.geo.uu.nl/pcraster/4.1.0/doc/modflow/bas.html
        # - Alternative 1: all cells are active
        self.ibound = pcr.spatial(pcr.nominal(1.))
        #~ # - Alternative 2: in the ocean region (x < -75 m), assume the heads will follow the tides
        #~ self.ibound = pcr.ifthenelse(pcr.xcoordinate(clone_map) < -75., pcr.nominal(-1.), pcr.nominal(1.))
        #~ pcr.aguila(self.ibound)

        # ibound with regional groundwater head values
        if self.model_setup['regional_groundwater_head']['activation']:
            self.ibound = pcr.ifthenelse(
                pcr.xcoordinate(pcr.boolean(1.0)) >=
                self.model_setup['regional_groundwater_head']['starting_x'],
                pcr.nominal(-1.), self.ibound)

        # parameter values for the SOLVER package
        self.MXITER = 50  # maximum number of outer iterations           # Deltares use 50
        self.ITERI = 30  # number of inner iterations                   # Deltares use 30
        self.NPCOND = 1  # 1 - Modified Incomplete Cholesky, 2 - Polynomial matrix conditioning method
        self.HCLOSE = 0.001  # HCLOSE (unit: m)
        self.RCLOSE = 0.001  # RCLOSE (unit: m3)
        self.RELAX = 1.00  # relaxation parameter used with NPCOND = 1
        self.NBPOL = 2  # indicates whether the estimate of the upper bound on the maximum eigenvalue is 2.0 (but we don ot use it, since NPCOND = 1)
        self.DAMP = 1  # no damping (DAMP introduced in MODFLOW 2000)

        # the initial head for the BAS package, see: http://pcraster.geo.uu.nl/pcraster/4.1.0/doc/modflow/bas.html
        # - Gerben recommends to start using 0.8 m
        self.initial_head = pcr.spatial(pcr.scalar(0.8))

        # regional groundwater head
        if self.model_setup['regional_groundwater_head']['activation']:
            self.initial_head = pcr.ifthenelse(
                pcr.xcoordinate(pcr.boolean(1.0)) >=
                self.model_setup['regional_groundwater_head']['starting_x'],
                self.model_setup['regional_groundwater_head']['value'],
                self.initial_head)

        # initialise timeoutput object for reporting time series in txt files
        # - groundwater head
        self.head_obs_point = pcr.readmap(
            "input_files/groundwater_well_coordinates.map")
        self.reportGwHeadTss = TimeoutputTimeseries("groundwater_head",
                                                    self,
                                                    self.head_obs_point,
                                                    noHeader=False)

        # Save model parameter values (and other information) to a txt file.
        # - output directory (that will contain result)
        output_directory = self.output_folder + "/" + str(
            self.currentSampleNumber())
        try:
            os.makedirs(output_directory)
        except:
            pass
        # - file name for this information
        information_file = output_directory + "/" + "info.txt"
        file_info = open(information_file, 'w')
        write_line = ""
        # - DEM
        write_line += "DEM (m): " + str(self.model_setup['dem_file_name'])
        write_line += "\n"
        # - tide
        write_line += "Tide input file name: " + str(
            self.model_setup['tide_file_name'])
        write_line += "\n"
        # - starting date
        write_line += "Starting date and time: " + str(
            self.model_setup['start_datetime'])
        write_line += "\n"
        # - soil conductivity
        write_line += "Soil conductivity (m.day-1): " + str(
            inp_soil_conductivity)
        write_line += "\n"
        # - regional groundwater head (optional):
        if self.model_setup['regional_groundwater_head']['activation']:
            write_line += "Regional groundwater head is fixed to : " + str(
                self.model_setup['regional_groundwater_head']['value'])
            write_line += "\n"
            write_line += "for all cells in  with x coordinate >=: " + str(
                self.model_setup['regional_groundwater_head']['starting_x'])
            write_line += "\n"
        else:
            write_line += "NO regional groundwater head. "
            write_line += "\n"
        # - sample number
        write_line += "PCRaster sample number: " + str(
            self.currentSampleNumber())
        write_line += "\n"
        file_info.write(write_line)
        # - close the file
        file_info.close()

        # initiate netcdf files for groundwater head
        netcdf_variable_name = "groundwater_head"
        self.netcdf_file_name = self.output_folder + "/" + str(
            self.currentSampleNumber()) + "/" + netcdf_variable_name + ".nc"
        self.model_setup['netcdf_attributes']['notes'] = write_line
        self.netcdf_writer.create_netcdf_file(
            self.netcdf_file_name, self.model_setup['netcdf_attributes'])
        # - create variable
        netcdf_variable_unit = "m"
        self.netcdf_writer.create_variable(self.netcdf_file_name,
                                           netcdf_variable_name,
                                           netcdf_variable_unit)
示例#32
0
    def __init__(
        self,
        netcdffile,
        logger,
        starttime,
        timesteps,
        EPSG="EPSG:4326",
        timestepsecs=86400,
        metadata={},
        zlib=True,
        Format="NETCDF4",
        maxbuf=25,
        least_significant_digit=None,
    ):
        """
        Under construction
        """

        self.EPSG = EPSG
        self.zlib = zlib
        self.Format = Format
        self.least_significant_digit = least_significant_digit

        def date_range(start, end, timestepsecs):
            r = int(
                (end + dt.timedelta(seconds=timestepsecs) - start).total_seconds()
                / timestepsecs
            )
            return [start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r)]

        self.logger = logger
        # Do not allow a max buffer larger than the number of timesteps
        self.maxbuf = maxbuf if timesteps >= maxbuf else timesteps
        self.ncfile = netcdffile
        self.timesteps = timesteps
        rows = pcr.clone().nrRows()
        cols = pcr.clone().nrCols()
        cellsize = pcr.clone().cellSize()
        yupper = pcr.clone().north()
        xupper = pcr.clone().west()
        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0]

        # Shift one timestep as we output at the end
        # starttime = starttime + dt.timedelta(seconds=timestepsecs)
        end = starttime + dt.timedelta(seconds=timestepsecs * (self.timesteps - 1))

        timeList = date_range(starttime, end, timestepsecs)
        self.timestepbuffer = np.zeros((self.maxbuf, len(y), len(x)))
        self.bufflst = {}
        self.buffdirty = False

        globmetadata.update(metadata)

        prepare_nc(
            self.ncfile,
            timeList,
            x,
            y,
            globmetadata,
            logger,
            Format=self.Format,
            EPSG=EPSG,
            zlib=self.zlib,
            least_significant_digit=self.least_significant_digit,
        )
示例#33
0
    def __init__(self, netcdffile, logging, vars=[]):
        """
        First try to setup a class read netcdf files
        (converted with pcr2netcdf.py)

        netcdffile: file to read the forcing data from
        logging: python logging object
        vars: list of variables to get from file
        """

        self.fname = netcdffile
        if os.path.exists(netcdffile):
            self.dataset = netCDF4.Dataset(netcdffile, mode="r")
        else:
            msg = os.path.abspath(netcdffile) + " not found!"
            logging.error(msg)
            raise ValueError(msg)

        logging.info("Reading state input from netCDF file: " + netcdffile)
        self.alldat = {}
        a = pcr.pcr2numpy(pcr.cover(0.0), 0.0).flatten()
        # Determine steps to load in mem based on estimated memory usage
        floatspermb = 1048576 / 4
        maxmb = 40
        self.maxsteps = maxmb * len(a) / floatspermb + 1
        self.fstep = 0
        self.lstep = self.fstep + self.maxsteps

        self.datetime = self.dataset.variables["time"][:]
        if hasattr(self.dataset.variables["time"], "units"):
            self.timeunits = self.dataset.variables["time"].units
        else:
            self.timeunits = "Seconds since 1970-01-01 00:00:00"
        if hasattr(self.dataset.variables["time"], "calendar"):
            self.calendar = self.dataset.variables["time"].calendar
        else:
            self.calendar = "gregorian"
        self.datetimelist = cftime.num2date(
            self.datetime, self.timeunits, calendar=self.calendar
        )

        try:
            self.x = self.dataset.variables["x"][:]
        except:
            self.x = self.dataset.variables["lon"][:]

        # Now check Y values to see if we must flip the data
        try:
            self.y = self.dataset.variables["y"][:]
        except:
            self.y = self.dataset.variables["lat"][:]

        # test if 1D or 2D array
        if len(self.y.shape) == 1:
            if self.y[0] > self.y[-1]:
                self.flip = False
            else:
                self.flip = True
        else:  # not sure if this works
            self.y = self.y[:][0]
            if self.y[0] > self.y[-1]:
                self.flip = False
            else:
                self.flip = True

        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0]

        # Get average cell size
        acc = (
            np.diff(x).mean() * 0.25
        )  # non-exact match needed becuase of possible rounding problems
        if self.flip:
            (self.latidx,) = np.logical_and(
                self.y[::-1] + acc >= y.min(), self.y[::-1] <= y.max() + acc
            ).nonzero()
            (self.lonidx,) = np.logical_and(
                self.x + acc >= x.min(), self.x <= x.max() + acc
            ).nonzero()
        else:
            (self.latidx,) = np.logical_and(
                self.y + acc >= y.min(), self.y <= y.max() + acc
            ).nonzero()
            (self.lonidx,) = np.logical_and(
                self.x + acc >= x.min(), self.x <= x.max() + acc
            ).nonzero()

        if len(self.lonidx) != len(x):
            logging.error("error in determining X coordinates in netcdf...")
            logging.error("model expects: " + str(x.min()) + " to " + str(x.max()))
            logging.error(
                "got coordinates  netcdf: "
                + str(self.x.min())
                + " to "
                + str(self.x.max())
            )
            logging.error(
                "got len from  netcdf x: "
                + str(len(x))
                + " expected "
                + str(len(self.lonidx))
            )
            raise ValueError("X coordinates in netcdf do not match model")

        if len(self.latidx) != len(y):
            logging.error("error in determining Y coordinates in netcdf...")
            logging.error("model expects: " + str(y.min()) + " to " + str(y.max()))
            logging.error(
                "got from  netcdf: " + str(self.y.min()) + " to " + str(self.y.max())
            )
            logging.error(
                "got len from  netcdf y: "
                + str(len(y))
                + " expected "
                + str(len(self.latidx))
            )
            raise ValueError("Y coordinates in netcdf do not match model")

        for var in vars:
            try:
                self.alldat[var] = self.dataset.variables[var][
                    self.fstep : self.maxsteps
                ]
            except:
                self.alldat.pop(var, None)
                logging.warning(
                    "Variable " + var + " not found in netcdf file: " + netcdffile
                )
示例#34
0
    def evaluateAllModelResults(self,globalCloneMapFileName,\
                                catchmentClassFileName,\
                                lddMapFileName,\
                                cellAreaMapFileName,\
                                pcrglobwb_output,\
                                analysisOutputDir="",\
                                tmpDir = "/dev/shm/edwin_grdc_"):     

        # output directory for all analyses for all stations
        analysisOutputDir   = str(analysisOutputDir)
        self.chartOutputDir = analysisOutputDir+"/chart/"
        self.tableOutputDir = analysisOutputDir+"/table/"
        #
        if analysisOutputDir == "": self.chartOutputDir = "chart/"
        if analysisOutputDir == "": self.tableOutputDir = "table/"
        #
        # make the chart and table directories:
        os.system('rm -r '+self.chartOutputDir+"*")
        os.system('rm -r '+self.tableOutputDir+"*")
        os.makedirs(self.chartOutputDir)
        os.makedirs(self.tableOutputDir)
        
        # cloneMap for all pcraster operations
        pcr.setclone(globalCloneMapFileName)
        cloneMap = pcr.boolean(1)
        self.cell_size_in_arc_degree = vos.getMapAttributesALL(globalCloneMapFileName)['cellsize']
        
        lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))
        cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))
        
        # The landMaskClass map contains the nominal classes for all landmask regions. 
        landMaskClass = pcr.nominal(cloneMap)  # default: if catchmentClassFileName is not given
        if catchmentClassFileName != None:
            landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))

        # model catchment areas and cordinates
        catchmentAreaAll = pcr.catchmenttotal(cellArea, lddMap) / (1000*1000)  # unit: km2
        xCoordinate = pcr.xcoordinate(cloneMap)
        yCoordinate = pcr.ycoordinate(cloneMap)

	print "Jaaaaaa"
        
        for id in self.list_of_grdc_ids: 

            logger.info("Evaluating simulated discharge to the grdc observation at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".")
            
            # identify model pixel
            self.identifyModelPixel(tmpDir,catchmentAreaAll,landMaskClass,xCoordinate,yCoordinate,str(id))

            # evaluate model results to GRDC data
            self.evaluateModelResultsToGRDC(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)
            
        # write the summary to a table 
        summary_file = analysisOutputDir+"summary.txt"
        #
        logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".")
        #
        # prepare the file:
        summary_file_handle = open(summary_file,"w")
        #
        # write the header
        summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n")
        #
        # write the content
        for id in self.list_of_grdc_ids:
            rowLine  = ""
            for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";"   
            rowLine = rowLine[0:-1] + "\n"
            summary_file_handle.write(rowLine)
        summary_file_handle.close()           
def main():
	#-initialization
	# MVs
	MV= -999.
	# minimum catchment size to process
	catchmentSizeLimit= 0.0
	# period of interest, start and end year
	startYear= 1961
	endYear= 2010
	# maps
	cloneMapFileName= '/data/hydroworld/PCRGLOBWB20/input30min/global/Global_CloneMap_30min.map'
	lddFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map'
	cellAreaFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/cellarea30min.map'
	# set clone 
	pcr.setclone(cloneMapFileName)
	# output
	outputPath= '/scratch/rens/reservedrecharge'
	percentileMapFileName= os.path.join(outputPath,'q%03d_cumsec.map')
	textFileName= os.path.join(outputPath,'groundwater_environmentalflow_%d.txt')
	fractionReservedRechargeMapFileName= os.path.join(outputPath,'fraction_reserved_recharge%d.map')
	fractionMinimumReservedRechargeMapFileName= os.path.join(outputPath,'minimum_fraction_reserved_recharge%d.map')
	# input
	inputPath= '/nfsarchive/edwin-emergency-backup-DO-NOT-DELETE/rapid/edwin/05min_runs_results/2015_04_27/non_natural_2015_04_27/global/netcdf/'
	# define data to be read from netCDF files
	ncData= {}
	variableName= 'totalRunoff'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'qloc')
	ncData[variableName]['annualAverage']= pcr.scalar(0)	
	variableName= 'gwRecharge'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'gwRecharge_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'gwrec')
	ncData[variableName]['annualAverage']= pcr.scalar(0)
	variableName= 'discharge'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'qc')
	ncData[variableName]['annualAverage']= pcr.scalar(0)
	ncData[variableName]['mapStack']= np.array([])
	# percents and environmental flow condition set as percentile
	percents= range(10,110,10)
	environmentalFlowPercent= 10
	if environmentalFlowPercent not in percents:
		percents.append(environmentalFlowPercent)
		percents.sort()

	#-start
	# obtain attributes
	pcr.setclone(cloneMapFileName)
	cloneSpatialAttributes= spatialAttributes(cloneMapFileName)
	years= range(startYear,endYear+1)
	# output path
	if not os.path.isdir(outputPath):
		os.makedirs(outputPath)
	os.chdir(outputPath)
	# compute catchments
	ldd= pcr.readmap(lddFileName)
	cellArea= pcr.readmap(cellAreaFileName)
	catchments= pcr.catchment(ldd,pcr.pit(ldd))
	fractionWater= pcr.scalar(0.0) # temporary!
	lakeMask= pcr.boolean(0) # temporary!
	pcr.report(catchments,os.path.join(outputPath,'catchments.map'))
	maximumCatchmentID= int(pcr.cellvalue(pcr.mapmaximum(pcr.scalar(catchments)),1)[0])
	# iterate over years
	weight= float(len(years))**-1
	for year in years:
		#-echo year
		print ' - processing year %d' % year
		#-process data
		startDate= datetime.datetime(year,1,1)
		endDate= datetime.datetime(year,12,31)
		timeSteps= endDate.toordinal()-startDate.toordinal()+1
		dynamicIncrement= 1
		for variableName in ncData.keys():
			print '   extracting %s' % variableName,
			ncFileIn= ncData[variableName]['fileName']
			#-process data
			pcrDataSet= pcrObject(variableName, ncData[variableName]['fileRoot'],\
				ncFileIn,cloneSpatialAttributes, pcrVALUESCALE= pcr.Scalar, resamplingAllowed= True,\
				dynamic= True, dynamicStart= startDate, dynamicEnd= endDate, dynamicIncrement= dynamicIncrement, ncDynamicDimension= 'time')
			pcrDataSet.initializeFileInfo()
			pcrDataSet.processFileInfo()
			for fileInfo in pcrDataSet.fileProcessInfo.values()[0]:
				tempFileName= fileInfo[1]
				variableField= pcr.readmap(tempFileName)
				variableField= pcr.ifthen(pcr.defined(ldd),pcr.cover(variableField,0))
				if variableName == 'discharge':
					dayNumber= int(os.path.splitext(tempFileName)[1].strip('.'))
					date= datetime.date(year,1,1)+datetime.timedelta(dayNumber-1)
					numberDays= calendar.monthrange(year,date.month)[1]
					variableField= pcr.max(0,pcr.catchmenttotal(variableField*cellArea,ldd)/(numberDays*24*3600))
				ncData[variableName]['annualAverage']+= weight*variableField
				if 'mapStack' in ncData[variableName].keys():
					tempArray= pcr2numpy(variableField,MV)
					mask= tempArray != MV
					if ncData[variableName]['mapStack'].size != 0:
						ncData[variableName]['mapStack']= np.vstack((ncData[variableName]['mapStack'],tempArray[mask]))
					else:
						ncData[variableName]['mapStack']= tempArray[mask]
						coordinates= np.zeros((ncData[variableName]['mapStack'].size,2))
						pcr.setglobaloption('unitcell')
						tempArray= pcr2numpy(pcr.ycoordinate(pcr.boolean(1))+0.5,MV)
						coordinates[:,0]= tempArray[mask]
						tempArray= pcr2numpy(pcr.xcoordinate(pcr.boolean(1))+0.5,MV)
						coordinates[:,1]= tempArray[mask]      
				os.remove(tempFileName)				
			# delete object
			pcrDataSet= None
			del pcrDataSet
			# close line on screen
			print
	# report annual averages
	key= 'annualAverage'
	ncData['discharge'][key]/= 12
	for variableName in ncData.keys():
		ncData[variableName][key]= pcr.max(0,ncData[variableName][key])
		pcr.report(ncData[variableName][key],\
			os.path.join(outputPath,'%s_%s.map' % (variableName,key)))
	# remove aux.xml
	for tempFileName in os.listdir(outputPath):
		if 'aux.xml' in tempFileName:
			os.remove(tempFileName)
	# sort data
	print 'sorting discharge data'
	variableName= 'discharge'
	key= 'mapStack'
	indices= np.zeros((ncData[variableName][key].shape),np.uint)
	for iCnt in xrange(ncData[variableName][key].shape[1]):
		indices[:,iCnt]= ncData[variableName][key][:,iCnt].argsort(kind= 'mergesort')
		ncData[variableName][key][:,iCnt]= ncData[variableName][key][:,iCnt][indices[:,iCnt]]
	# extract values for percentiles
	print 'returning maps'
	for percent in percents:
		percentile= 0.01*percent
		index0= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0]))
		index1= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])+1)
		x0= float(index0)/ncData[variableName][key].shape[0]
		x1= float(index1)/ncData[variableName][key].shape[0]
		if x0 <> x1:
			y= ncData[variableName][key][index0,:]+(percentile-x0)*\
				 (ncData[variableName][key][index1,:]-ncData[variableName][key][index0,:])/(x1-x0)
		else:
			y= ncData[variableName][key][index0,:]
		# convert a slice of the stack into an array
		tempArray= np.ones((cloneSpatialAttributes.numberRows,cloneSpatialAttributes.numberCols))*MV
		for iCnt in xrange(coordinates.shape[0]):
			row= coordinates[iCnt,0]-1
			col= coordinates[iCnt,1]-1
			tempArray[row,col]= y[iCnt]
		variableField= numpy2pcr(pcr.Scalar,tempArray,MV)
		pcr.report(variableField,percentileMapFileName % percent)
		if percent == environmentalFlowPercent:
			ncData[variableName]['environmentalFlow']= variableField
		tempArray= None; variableField= None
		del tempArray, variableField
	# process environmental flow
	# initialize map of reserved recharge fraction
	fractionReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0))
	fractionMinimumReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0))
	textFile= open(textFileName % environmentalFlowPercent,'w')
	hStr= 'Environmental flow analysis per basin, resulting in a map of renewable, exploitable recharge, for the %d%s quantile of discharge\n' % (environmentalFlowPercent,'%')
	hStr+= 'Returns Q_%d/R, the fraction of reserved recharge needed to sustain fully the environental flow requirement defined as the %d percentile,\n' % (environmentalFlowPercent, environmentalFlowPercent)
	hStr+= 'and Q*_%d/R, a reduced fraction that takes the availability of surface water into account\n' % environmentalFlowPercent
	textFile.write(hStr)
	print hStr
	# create header to display on screen and write to file
	# reported are: 1: ID, 2: Area, 3: average discharge, 4: environmental flow, 5: average recharge,
	# 6: Q_%d/Q, 7: Q_%d/R_Avg, 8: R_Avg/Q_Avg, 9: Q*_%d/R_Avg
	hStr= '%6s,%15s,%15s,%15s,%15s,%15s,%15s,%15s,%15s\n' % \
		('ID','Area [km2]','Q_Avg [m3]','Q_%d [m3]' % environmentalFlowPercent ,'R_Avg [m3]','Q_%d/Q_Avg [-]' % environmentalFlowPercent,\
			'Q_%d/Q_Avg [-]' % environmentalFlowPercent,'R_Avg/Q_Avg [-]','Q*_%d/Q_Avg [-]' % environmentalFlowPercent)
	textFile.write(hStr)
	print hStr
	for catchment in xrange(1,maximumCatchmentID+1):
		# create catchment mask and check whether it does not coincide with a lake
		catchmentMask= catchments == catchment
		catchmentSize= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,cellArea*1.e-6)),1)[0]
		#~ ##~ if pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(lakeMask))),1) <> \
				#~ ##~ pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(catchmentMask))),1)[0] and \
				#~ ##~ catchmentSize > catchmentSizeLimit:
		key= 'annualAverage'
		variableName= 'discharge'			
		if bool(pcr.cellvalue(pcr.maptotal(pcr.ifthen((ldd == 5) & catchmentMask,\
				pcr.scalar(ncData[variableName][key] > 0))),1)[0]) and catchmentSize >= catchmentSizeLimit:
			# valid catchment, process
			# all volumes are in m3 per year
			key= 'annualAverage'
			catchmentAverageDischarge= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\
				ncData[variableName][key])),1)[0]*365.25*3600*24
			variableName= 'gwRecharge'
			catchmentRecharge= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\
				(1.-fractionWater)*cellArea)),1)[0]
			variableName= 'totalRunoff'
			catchmentRunoff= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\
				cellArea)),1)[0]
			key= 'environmentalFlow'
			variableName= 'discharge'			
			catchmentEnvironmentalFlow= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\
				ncData[variableName][key])),1)[0]*365.25*3600*24
			catchmentRunoff= max(catchmentRunoff,catchmentEnvironmentalFlow)
			if catchmentAverageDischarge > 0.:
				fractionEnvironmentalFlow= catchmentEnvironmentalFlow/catchmentAverageDischarge
				fractionGroundWaterContribution= catchmentRecharge/catchmentAverageDischarge
			else:
				fractionEnvironmentalFlow= 0.
				fractionGroundWaterContribution= 0.
			if catchmentRecharge > 0:
				fractionReservedRecharge= min(1,catchmentEnvironmentalFlow/catchmentRecharge)
			else:
				fractionReservedRecharge= 1.0
			fractionMinimumReservedRecharge= (fractionReservedRecharge+fractionGroundWaterContribution-\
				fractionReservedRecharge*fractionGroundWaterContribution)*fractionReservedRecharge
			#~ # echo to screen, and write to file and map
			wStr= '%6s,%15.1f,%15.6g,%15.6g,%15.6g,%15.6f,%15.6f,%15.6f,%15.6f\n' % \
				(catchment,catchmentSize,catchmentAverageDischarge,catchmentEnvironmentalFlow,catchmentRecharge,\
					fractionEnvironmentalFlow,fractionReservedRecharge,fractionGroundWaterContribution,fractionMinimumReservedRecharge)
			print wStr
			textFile.write(wStr)
			# update maps
			fractionReservedRechargeMap= pcr.ifthenelse(catchmentMask,\
				pcr.scalar(fractionReservedRecharge),fractionReservedRechargeMap)
			fractionMinimumReservedRechargeMap= pcr.ifthenelse(catchmentMask,\
				pcr.scalar(fractionMinimumReservedRecharge),fractionMinimumReservedRechargeMap)
	#-report map and close text file
	pcr.report(fractionReservedRechargeMap,fractionReservedRechargeMapFileName % environmentalFlowPercent)
	pcr.report(fractionMinimumReservedRechargeMap,fractionMinimumReservedRechargeMapFileName % environmentalFlowPercent)
	# close text file
	textFile.close()
	# finished
	print 'all done!'
示例#36
0
    def __init__(
        self,
        netcdffile,
        logger,
        starttime,
        timesteps,
        EPSG="EPSG:4326",
        timestepsecs=86400,
        metadata={},
        zlib=True,
        Format="NETCDF4",
        maxbuf=25,
        least_significant_digit=None,
    ):
        """
        Under construction
        """

        self.EPSG = EPSG
        self.zlib = zlib
        self.Format = Format
        self.least_significant_digit = least_significant_digit

        def date_range(start, end, timestepsecs):
            r = int((end + dt.timedelta(seconds=timestepsecs) -
                     start).total_seconds() / timestepsecs)
            return [
                start + dt.timedelta(seconds=(timestepsecs * i))
                for i in range(r)
            ]

        self.logger = logger
        # Do not allow a max buffer larger than the number of timesteps
        self.maxbuf = maxbuf if timesteps >= maxbuf else timesteps
        self.ncfile = netcdffile
        self.timesteps = timesteps
        rows = pcr.clone().nrRows()
        cols = pcr.clone().nrCols()
        cellsize = pcr.clone().cellSize()
        yupper = pcr.clone().north()
        xupper = pcr.clone().west()
        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))),
                          np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))),
                          np.nan)[:, 0]

        # Shift one timestep as we output at the end
        # starttime = starttime + dt.timedelta(seconds=timestepsecs)
        end = starttime + dt.timedelta(seconds=timestepsecs *
                                       (self.timesteps - 1))

        timeList = date_range(starttime, end, timestepsecs)
        self.timestepbuffer = np.zeros((self.maxbuf, len(y), len(x)))
        self.bufflst = {}
        self.buffdirty = False

        globmetadata.update(metadata)

        prepare_nc(
            self.ncfile,
            timeList,
            x,
            y,
            globmetadata,
            logger,
            Format=self.Format,
            EPSG=EPSG,
            zlib=self.zlib,
            least_significant_digit=self.least_significant_digit,
        )
示例#37
0
def getPCRcoords(PCRmap, missing_value_pcr=-999):
    """
    Get all vertices coordinates of a PCRaster map.

    Input:
	-----
	pcraster map (preferrably landmask)
	value for MV (optional, default at -999)

    Output:
	------
	list of (x,y) coordinates of each polygon

    """
    # Get coordinates as numpy array:
    # upper left coordinates
    pcr.setglobaloption("coorul")

    xcoord_pcr_ul_map = pcr.xcoordinate(PCRmap)
    xcoord_pcr_ul_np = pcr.pcr2numpy(xcoord_pcr_ul_map, missing_value_pcr)

    ycoord_pcr_ul_map = pcr.ycoordinate(PCRmap)
    ycoord_pcr_ul_np = pcr.pcr2numpy(ycoord_pcr_ul_map, missing_value_pcr)

    # lower right coordinates
    pcr.setglobaloption("coorlr")

    xcoord_pcr_lr_map = pcr.xcoordinate(PCRmap)
    xcoord_pcr_lr_np = pcr.pcr2numpy(xcoord_pcr_lr_map, missing_value_pcr)

    ycoord_pcr_lr_map = pcr.ycoordinate(PCRmap)
    ycoord_pcr_lr_np = pcr.pcr2numpy(ycoord_pcr_lr_map, missing_value_pcr)

    # centroid coordinates
    pcr.setglobaloption("coorcentre")

    xcoord_pcr_centr_map = pcr.xcoordinate(PCRmap)
    xcoord_pcr_centr_np = pcr.pcr2numpy(xcoord_pcr_centr_map,
                                        missing_value_pcr)

    ycoord_pcr_centr_map = pcr.ycoordinate(PCRmap)
    ycoord_pcr_centr_np = pcr.pcr2numpy(ycoord_pcr_centr_map,
                                        missing_value_pcr)

    # Construct collection of polygon vertices:
    # number of arrays/elements to loop over and/or construct new arrays
    array_count_pcr = len(ycoord_pcr_lr_np)
    elements_per_array_pcr = np.size(ycoord_pcr_lr_np) / array_count_pcr
    nonmiss_val_per_array_pcr = np.sum(ycoord_pcr_lr_np != missing_value_pcr)

    # filling empty arrays while looping over data
    i, j = np.where(xcoord_pcr_lr_np != missing_value_pcr)
    xcoord_pcr_lr_np_nonmiss = xcoord_pcr_lr_np[i, j]
    xcoord_pcr_ul_np_nonmiss = xcoord_pcr_ul_np[i, j]
    xcoord_pcr_ll_np_nonmiss = xcoord_pcr_ul_np[i, j]
    xcoord_pcr_ur_np_nonmiss = xcoord_pcr_lr_np[i, j]

    ycoord_pcr_lr_np_nonmiss = ycoord_pcr_lr_np[i, j]
    ycoord_pcr_ul_np_nonmiss = ycoord_pcr_ul_np[i, j]
    ycoord_pcr_ll_np_nonmiss = ycoord_pcr_lr_np[i, j]
    ycoord_pcr_ur_np_nonmiss = ycoord_pcr_ul_np[i, j]

    xcoord_pcr_centr_np_nonmiss = xcoord_pcr_centr_np[i, j]
    ycoord_pcr_centr_np_nonmiss = ycoord_pcr_centr_np[i, j]

    # empty collection for polygons
    ll = zip(xcoord_pcr_ll_np_nonmiss, ycoord_pcr_ll_np_nonmiss)
    lr = zip(xcoord_pcr_lr_np_nonmiss, ycoord_pcr_lr_np_nonmiss)
    ur = zip(xcoord_pcr_ur_np_nonmiss, ycoord_pcr_ur_np_nonmiss)
    ul = zip(xcoord_pcr_ul_np_nonmiss, ycoord_pcr_ul_np_nonmiss)
    # wrap all cell coordinates into a list of lists (one list per cell, with multiple tuples per cell corner)
    all_cell_coords_pcr = [[ll[i], lr[i], ur[i], ul[i]]
                           for i in range(len(ll))]

    return all_cell_coords_pcr
    max_step = 5
    for i in range(1, max_step+1, 1):
        cmd = "Extending class: step "+str(i)+" from " + str(max_step)
        print(cmd)
        uniqueIDs = pcr.cover(uniqueIDs, pcr.windowmajority(uniqueIDs, 0.5))
    # - use only cells within the landmask
    uniqueIDs = pcr.ifthen(landmask, uniqueIDs)
    pcr.report(uniqueIDs, "class_ids.map")                                
    
    # cell area at 5 arc min resolution
    cellArea = vos.readPCRmapClone(cellArea05minFile,
                                   cloneMapFileName, tmp_directory)
    cellArea = pcr.ifthen(landmask, cellArea)
    
    # get a sample cell for every id
    x_min_for_each_id = pcr.areaminimum(pcr.xcoordinate(pcr.boolean(1.0)), uniqueIDs)
    sample_cells      = pcr.xcoordinate(pcr.boolean(1.0)) == x_min_for_each_id
    y_min_for_each_id = pcr.areaminimum(pcr.ycoordinate(sample_cells), uniqueIDs)
    sample_cells      = pcr.ycoordinate(sample_cells) == y_min_for_each_id
    uniqueIDs_sample  = pcr.ifthen(sample_cells, uniqueIDs)
    # - save it to a pcraster map file
    pcr.report(uniqueIDs_sample, "sample.ids")                                

    # calculate the country values 
    index = 0 # for posCnt
    for iYear in range(staYear,endYear+1):
        
        # time stamp and index for netcdf files:
        index = index + 1
        timeStamp = datetime.datetime(int(iYear), int(12), int(31), int(0))
        fulldate = '%4i-%02i-%02i'  %(int(iYear), int(12), int(31))
    def evaluateAllModelResults(self,globalCloneMapFileName,\
                                catchmentClassFileName,\
                                lddMapFileName,\
                                cellAreaMapFileName,\
                                pcrglobwb_output,\
                                analysisOutputDir="",\
                                tmpDir = None):     

        # temporary directory
        if tmpDir == None: tmpDir = self.tmpDir+"/edwin_grdc_"
        
        # output directory for all analyses for all stations
        analysisOutputDir   = str(analysisOutputDir)
        self.chartOutputDir = analysisOutputDir+"/chart/"
        self.tableOutputDir = analysisOutputDir+"/table/"
        #
        if analysisOutputDir == "": self.chartOutputDir = "chart/"
        if analysisOutputDir == "": self.tableOutputDir = "table/"
        #
        # make the chart and table directories:
        os.system('rm -r '+self.chartOutputDir+"*")
        os.system('rm -r '+self.tableOutputDir+"*")
        os.makedirs(self.chartOutputDir)
        os.makedirs(self.tableOutputDir)
        
        # cloneMap for all pcraster operations
        pcr.setclone(globalCloneMapFileName)
        cloneMap = pcr.boolean(1)
        self.cell_size_in_arc_degree = vos.getMapAttributesALL(globalCloneMapFileName)['cellsize']
        
        lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))
        cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))
        
        # The landMaskClass map contains the nominal classes for all landmask regions. 
        landMaskClass = pcr.nominal(cloneMap)  # default: if catchmentClassFileName is not given
        if catchmentClassFileName != None:
            landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))

        # model catchment areas and cordinates
        catchmentAreaAll = pcr.catchmenttotal(cellArea, lddMap) / (1000*1000)  # unit: km2
        xCoordinate = pcr.xcoordinate(cloneMap)
        yCoordinate = pcr.ycoordinate(cloneMap)
        
        for id in self.list_of_grdc_ids: 

            logger.info("Evaluating simulated discharge to the grdc observation at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".")
            
            # identify model pixel
            self.identifyModelPixel(tmpDir,catchmentAreaAll,landMaskClass,xCoordinate,yCoordinate,str(id))

            # evaluate model results to GRDC data
            self.evaluateModelResultsToGRDC(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)
            
        # write the summary to a table 
        summary_file = analysisOutputDir+"summary.txt"
        #
        logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".")
        #
        # prepare the file:
        summary_file_handle = open(summary_file,"w")
        #
        # write the header
        summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n")
        #
        # write the content
        for id in self.list_of_grdc_ids:
            rowLine  = ""
            for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";"   
            rowLine = rowLine[0:-1] + "\n"
            summary_file_handle.write(rowLine)
        summary_file_handle.close()           
# 30 min cell ids
cell_ids_30min_file = "/data/hydroworld/others/irrigationZones/half_arc_degree/uniqueIds30min.nom.map"
cell_ids_30min = vos.readPCRmapClone(cell_ids_30min_file , clone_map_05min_file, \
                                     tmp_directory, \
                                     None, False, None, True)
cell_ids_30min = pcr.nominal(cell_ids_30min)

# reporting objects
# - for 5 arcmin resolution
latlonDict05min = {}
cloneMap = pcr.boolean(1.0)
latlonDict05min['lat'] = np.unique(
    pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
latlonDict05min['lon'] = np.unique(
    pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
report_netcdf_05min = outputNetCDF.OutputNetCDF(latlonDict05min)
# - for 30 arcmin resolution
latlonDict30min = {}
latlonDict30min['lat'] = np.arange(
    np.round(latlonDict05min['lat'][0] + 2.5 / 60 - 0.25, 2),
    latlonDict05min['lat'][-1] - 2.5 / 60, -0.5)
latlonDict30min['lon'] = np.arange(
    np.round(latlonDict05min['lon'][0] - 2.5 / 60 + 0.25, 2),
    latlonDict05min['lon'][-1] + 2.5 / 60, 0.5)
report_netcdf_30min = outputNetCDF.OutputNetCDF(latlonDict30min)
# TODO: Make this module writes for CF convention (see Hessel's document)

# preparing the file at  5 arcmin resolution:
output_file_05min = output_directory + "/" + file_name_front + "maximum_05min_" + str(
    start_year) + "_to_" + str(end_year) + ".nc"