Example #1
0
def getValAtPoint(in_map, xcor, ycor):
    """
    returns the value in a map at the point given.
    works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - value
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    themap = pcr.pcr2numpy(in_map, np.nan)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_
    pt = point.argmax()

    return themap.ravel()[pt]
Example #2
0
def getRowColPoint(in_map, xcor, ycor):
    """
    returns the row and col in a map at the point given.
    Works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - row, column
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_

    return point.argmax(0).max(), point.argmax(1).max()
Example #3
0
def getRowColPoint(in_map, xcor, ycor):
    """
    returns the row and col in a map at the point given.
    Works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - row, column
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_

    return point.argmax(0).max(), point.argmax(1).max()
Example #4
0
    def __init__(self, iniItems):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(pcr2numpy(pcr.ycoordinate(cloneMap),
                                             vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # TODO: Let users decide what their preference regarding latitude order.
        #       Consult with Stefanie regarding CF convention.

        # netCDF format and attributes:
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = iniItems.globalOptions[
            'institution']
        self.attributeDictionary['title'] = iniItems.globalOptions['title']
        self.attributeDictionary['description'] = iniItems.globalOptions[
            'description']

        # netcdf format and zlib setup
        self.format = 'NETCDF3_CLASSIC'
        self.zlib = False
        if "formatNetCDF" in iniItems.reportingOptions.keys():
            self.format = str(iniItems.reportingOptions['formatNetCDF'])
        if "zlib" in iniItems.reportingOptions.keys():
            if iniItems.reportingOptions['zlib'] == "True": self.zlib = True
Example #5
0
def getgridparams():
    """ return grid parameters in a python friendly way

    Output:
        [ Xul, Yul, xsize, ysize, rows, cols]

        - xul - x upper left centre
        - yul - y upper left centre
        - xsize - size of a cell in x direction
        - ysize - size of a cell in y direction
        - cols - number of columns
        - rows - number of rows
        - xlr -  x lower right centre
        - ylr -  y lower right centre
    """
    # This is the default, but add for safety...
    pcr.setglobaloption("coorcentre")
    # x and Y are the same for now
    xy = pcr.pcr2numpy(pcr.celllength(), np.nan)[0, 0]
    xu = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[0, 0]
    yu = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[0, 0]
    ylr = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[getrows() - 1, getcols() - 1]
    xlr = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[getrows() - 1, getcols() - 1]

    return [xu, yu, xy, xy, getrows(), getcols(), xlr, ylr]
Example #6
0
    def __init__(self, cloneMapFileName, netcdf_attribute_description):

        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution'] = "European Commission - JRC"
        self.attributeDictionary[
            'title'] = "EFAS-Meteo 5km for the Rhine-Meuse basin"
        self.attributeDictionary[
            'source'] = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary[
            'history'] = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014."
        self.attributeDictionary[
            'references'] = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary[
            'comment'] = "Please use this dataset only for Hyper-Hydro test bed experiments. "
        self.attributeDictionary[
            'comment'] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary[
            'comment'] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "

        self.attributeDictionary['description'] = netcdf_attribute_description
Example #7
0
  def test_001(self):
      """ nonspatial and pcr2numpy """
      nrRows, nrCols, cellSize = 5, 8, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      value = 1.23456
      nonspatial = pcraster.scalar(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)

      value = 3
      nonspatial = pcraster.nominal(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)

      value = True
      nonspatial = pcraster.boolean(value)
      array = pcraster.pcr2numpy(nonspatial, numpy.nan)

      for row in range(0, nrRows):
          for col in range(0, nrCols):
              self.assertAlmostEqual(array[row][col], value)
Example #8
0
    def __init__(self,configuration,model,specificAttributeDictionary=None):

	# Set clone map
        pcr.setclone(configuration.cloneMap)
        cloneMap = pcr.boolean(1.0)  # map with all cell values equal to 1

        # Retrieve latitudes and longitudes from clone map
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))
        self.crops  = np.arange(1, model.nCrop + 1)
        self.depths = np.arange(1, model.nComp + 1)
        
        # Let users decide what their preference regarding latitude order
        self.netcdf_y_orientation_follow_cf_convention = False
        if 'netcdf_y_orientation_follow_cf_convention' in configuration.reportingOptions.keys() and\
            configuration.reportingOptions['netcdf_y_orientation_follow_cf_convention'] == "True":
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))
        
        # Set general netcdf attributes (based on the information given in the ini/configuration file) 
        self.set_general_netcdf_attributes(configuration, specificAttributeDictionary)
        
        # netcdf format and zlib setup 
        self.format = 'NETCDF3_CLASSIC'
        self.zlib = False
        if "formatNetCDF" in configuration.reportingOptions.keys():
            self.format = str(configuration.reportingOptions['formatNetCDF'])
        if "zlib" in configuration.reportingOptions.keys():
            if configuration.reportingOptions['zlib'] == "True": self.zlib = True
Example #9
0
    def test_001(self):
        """ nonspatial and pcr2numpy """
        nrRows, nrCols, cellSize = 5, 8, 1.0
        west, north = 0.0, 0.0
        pcraster.setclone(nrRows, nrCols, cellSize, west, north)

        value = 1.23456
        nonspatial = pcraster.scalar(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)

        value = 3
        nonspatial = pcraster.nominal(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)

        value = True
        nonspatial = pcraster.boolean(value)
        array = pcraster.pcr2numpy(nonspatial, numpy.nan)

        for row in range(0, nrRows):
            for col in range(0, nrCols):
                self.assertAlmostEqual(array[row][col], value)
    def __init__(self,
                 cloneMapFileName,
                 resetClone=None,
                 attributeDictionary=None):

        # cloneMap
        if resetClone != None: pcr.setclone(cloneMapFileName)
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # reset clone (if necessary)
        if resetClone != None: pcr.setclone(resetClone)

        # netcdf format:
        self.format = 'NETCDF3_CLASSIC'

        self.attributeDictionary = {}
        if attributeDictionary == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'] = "None"
            self.attributeDictionary['source'] = "None"
            self.attributeDictionary['history'] = "None"
            self.attributeDictionary['references'] = "None"
            self.attributeDictionary['description'] = "None"
            self.attributeDictionary['comment'] = "None"
        else:
            self.attributeDictionary = attributeDictionary
Example #11
0
def getgridparams():
    """ return grid parameters in a python friendly way

    Output:
        [ Xul, Yul, xsize, ysize, rows, cols]

        - xul - x upper left centre
        - yul - y upper left centre
        - xsize - size of a cell in x direction
        - ysize - size of a cell in y direction
        - cols - number of columns
        - rows - number of rows
        - xlr -  x lower right centre
        - ylr -  y lower right centre
    """
    # This is the default, but add for safety...
    pcr.setglobaloption("coorcentre")
    # x and Y are the same for now
    xy = pcr.pcr2numpy(pcr.celllength(), np.nan)[0, 0]
    xu = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[0, 0]
    yu = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[0, 0]
    ylr = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[getrows() - 1, getcols() - 1]
    xlr = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[getrows() - 1, getcols() - 1]

    return [xu, yu, xy, xy, getrows(), getcols(), xlr, ylr]
Example #12
0
def _get_raster(raster):
    # Return masked array and colour scheme based on the raster type
    data = None
    colour = None

    if raster.dataType() == pcraster.Scalar:
        data = pcraster.pcr2numpy(raster, numpy.nan)
        mask = numpy.where(data == numpy.nan, True, False)
        data = numpy.ma.masked_array(data, mask)
    elif raster.dataType() == pcraster.Nominal:
        nan_val = -2147483648
        data = pcraster.pcr2numpy(raster, nan_val)
        mask = numpy.where(data == nan_val, True, False)
        data = numpy.ma.masked_array(data, mask)
    elif raster.dataType() == pcraster.Boolean:
        nan_val = 255
        data = pcraster.pcr2numpy(raster, nan_val)
        mask = numpy.where(data == nan_val, True, False)
        data = numpy.ma.masked_array(data, mask)
    else:
        msg = 'Plotting of rasters with data type "{}" is not supported'.format(
            str(raster.dataType()).split('.')[1])
        raise NotImplementedError(msg)

    return data
Example #13
0
def getValAtPoint(in_map, xcor, ycor):
    """
    returns the value in a map at the point given.
    works but is rather slow.

    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate
        - ycor - y coordinate

    Output:
        - value
    """
    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    XX = pcr.pcr2numpy(pcr.celllength(), 0.0)
    themap = pcr.pcr2numpy(in_map, np.nan)
    tolerance = 0.5  # takes a single point

    diffx = x - xcor
    diffy = y - ycor
    col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance)  # cellsize
    row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance)  # cellsize
    point = col_ * row_
    pt = point.argmax()

    return themap.ravel()[pt]
Example #14
0
    def __init__(self, netcdffile, logging):
        """
        First try to setup a class read netcdf files
        (converted with pcr2netcdf.py)

        netcdffile: file to read the forcing data from
        logging: python logging object
        vars: list of variables to get from file
        """

        if os.path.exists(netcdffile):
            self.dataset = netCDF4.Dataset(netcdffile, mode="r")
        else:
            msg = os.path.abspath(netcdffile) + " not found!"
            logging.error(msg)
            raise ValueError(msg)

        try:
            self.x = self.dataset.variables["x"][:]
        except:
            self.x = self.dataset.variables["lon"][:]
        # Now check Y values to see if we must flip the data
        try:
            self.y = self.dataset.variables["y"][:]
        except:
            self.y = self.dataset.variables["lat"][:]

        x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :]
        y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0]

        (self.latidx,) = np.logical_and(self.x >= x.min(), self.x < x.max()).nonzero()
        (self.lonidx,) = np.logical_and(self.y >= x.min(), self.y < y.max()).nonzero()

        logging.info("Reading static input from netCDF file: " + netcdffile)
Example #15
0
    def initialize_config(self, filename, loglevel=logging.DEBUG):
        """
        *Extended functionality*, see https://github.com/eWaterCycle/bmi/blob/master/src/main/python/bmi.py

        Read the ini file for the comnined bmi model and initializes all the bmi models
        listed in the config file.

        :param filename:
        :return: nothing
        """

        self.currenttimestep = 1

        fullpathname = os.path.abspath(filename)
        self.config = iniFileSetUp(fullpathname)
        self.datadir = os.path.dirname(fullpathname)
        inifile = os.path.basename(filename)

        # mappingdir = self.datadir + '\\bmi_mapping\\'
        mappingdir = (os.path.join(
            self.datadir,
            wfbmi.configget(self.config, "IdMapping", "folder",
                            "bmi_mapping")[0],
        ) + "\\")

        self.models = configsection(self.config, "models")
        self.exchanges = configsection(self.config, "exchanges")

        for item in self.exchanges:
            exchange_from = item.split(self.comp_sep)

            if len(exchange_from) == 3 and exchange_from[2].endswith("map"):
                map_temp = pcr.readmap(mappingdir + exchange_from[2])
                map_flip = np.flipud(pcr.pcr2numpy(map_temp, 0))
                ind_temp = np.where(map_flip == 1)
                ind = [list(ind_temp[0]), list(ind_temp[1])]
                self.indices_from.append(ind)
            else:
                self.indices_from.append([])

            exchange_to = self.config.get("exchanges",
                                          item).split(self.comp_sep)

            if len(exchange_to) == 3 and exchange_from[2].endswith("map"):
                map_temp = pcr.readmap(mappingdir + exchange_to[2])
                map_flip = np.flipud(pcr.pcr2numpy(map_temp, 0))
                ind_temp = np.where(map_flip == 1)
                ind = [list(ind_temp[0]), list(ind_temp[1])]
                self.indices_to.append(ind)
            else:
                self.indices_to.append([])

        for mod in self.models:
            self.bmimodels[mod] = wfbmi.wflowbmi_csdms()

        # Initialize all bmi model objects
        for key, value in self.bmimodels.items():
            modconf = os.path.join(self.datadir,
                                   self.config.get("models", key))
            self.bmimodels[key].initialize_config(modconf, loglevel=loglevel)
Example #16
0
def getCoordinates(cloneMap, MV=-9999):
    '''returns cell centre coordinates for a clone map as numpy array
	   return longitudes, latitudes '''
    cln = pcr.cover(pcr.boolean(cloneMap), pcr.boolean(1))
    xMap = pcr.xcoordinate(cln)
    yMap = pcr.ycoordinate(cln)
    return pcr.pcr2numpy(xMap, MV)[1, :], pcr.pcr2numpy(yMap, MV)[:, 1]
Example #17
0
def _get_raster(raster):
    # Return masked array and colour scheme based on the raster type
    data = None
    colour = None

    if raster.dataType() == pcraster.Scalar:
        data = pcraster.pcr2numpy(raster, numpy.nan)
        mask = numpy.where(data == numpy.nan, True, False)
        data = numpy.ma.masked_array(data, mask)
        colour = 'gist_rainbow_r'
    elif raster.dataType() == pcraster.Nominal:
        nan_val = -2147483648
        data = pcraster.pcr2numpy(raster, nan_val)
        mask = numpy.where(data == nan_val, True, False)
        data = numpy.ma.masked_array(data, mask)
        colour = 'tab20'
    elif raster.dataType() == pcraster.Boolean:
        nan_val = 255
        data = pcraster.pcr2numpy(raster, nan_val)
        mask = numpy.where(data == nan_val, True, False)
        data = numpy.ma.masked_array(data, mask)
        colours = ['#FF6666', '#66FF66']
        colour = ListedColormap(colours)
    elif raster.dataType() == pcraster.Ldd:
        nan_val = 9
        data = pcraster.pcr2numpy(raster, nan_val)
        # Do not paint raster values, thus white
        colour = ListedColormap(['#FFFFFF'])
    else:
        msg = 'Plotting of rasters with data type "{}" is not supported'.format(
            str(raster.dataType()).split('.')[1])
        raise NotImplementedError(msg)

    return data, colour
Example #18
0
    def __init__(self, iniItems, specificAttributeDictionary=None):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[
            ::-1
        ]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # Let users decide what their preference regarding latitude order.
        self.netcdf_y_orientation_follow_cf_convention = False
        if (
            "netcdf_y_orientation_follow_cf_convention"
            in list(iniItems.reportingOptions.keys())
            and iniItems.reportingOptions["netcdf_y_orientation_follow_cf_convention"]
            == "True"
        ):
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))

        # set the general netcdf attributes (based on the information given in the ini/configuration file)
        self.set_general_netcdf_attributes(iniItems, specificAttributeDictionary)

        # netcdf format and zlib setup
        self.format = "NETCDF3_CLASSIC"
        self.zlib = False
        if "formatNetCDF" in list(iniItems.reportingOptions.keys()):
            self.format = str(iniItems.reportingOptions["formatNetCDF"])
        if "zlib" in list(iniItems.reportingOptions.keys()):
            if iniItems.reportingOptions["zlib"] == "True":
                self.zlib = True

        # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files'
        if "specific_attributes_for_netcdf_output_files" in iniItems.allSections:
            for key in list(
                iniItems.specific_attributes_for_netcdf_output_files.keys()
            ):

                self.attributeDictionary[
                    key
                ] = iniItems.specific_attributes_for_netcdf_output_files[key]

                if self.attributeDictionary[key] == "None":
                    self.attributeDictionary[key] = ""

                if key == "history" and self.attributeDictionary[key] == "Default":
                    self.attributeDictionary[
                        key
                    ] = "created on " + datetime.datetime.today().isoformat(" ")
                if self.attributeDictionary[key] == "Default" and (
                    key == "date_created" or key == "date_issued"
                ):
                    self.attributeDictionary[key] = datetime.datetime.today().isoformat(
                        " "
                    )
Example #19
0
    def __init__(self, iniItems, specificAttributeDictionary=None):

        # cloneMap
        pcr.setclone(iniItems.cloneMap)
        cloneMap = pcr.boolean(1.0)

        # latitudes and longitudes
        self.latitudes = np.unique(
            pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(
            pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # Let users decide what their preference regarding latitude order.
        self.netcdf_y_orientation_follow_cf_convention = False
        if ("netcdf_y_orientation_follow_cf_convention" in list(
                iniItems.reportingOptions.keys()) and iniItems.
                reportingOptions["netcdf_y_orientation_follow_cf_convention"]
                == "True"):
            msg = "Latitude (y) orientation for output netcdf files start from the bottom to top."
            self.netcdf_y_orientation_follow_cf_convention = True
            self.latitudes = np.unique(
                pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))

        # set the general netcdf attributes (based on the information given in the ini/configuration file)
        self.set_general_netcdf_attributes(iniItems,
                                           specificAttributeDictionary)

        # netcdf format and zlib setup
        self.format = "NETCDF3_CLASSIC"
        self.zlib = False
        if "formatNetCDF" in list(iniItems.reportingOptions.keys()):
            self.format = str(iniItems.reportingOptions["formatNetCDF"])
        if "zlib" in list(iniItems.reportingOptions.keys()):
            if iniItems.reportingOptions["zlib"] == "True":
                self.zlib = True

        # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files'
        if "specific_attributes_for_netcdf_output_files" in iniItems.allSections:
            for key in list(
                    iniItems.specific_attributes_for_netcdf_output_files.keys(
                    )):

                self.attributeDictionary[
                    key] = iniItems.specific_attributes_for_netcdf_output_files[
                        key]

                if self.attributeDictionary[key] == "None":
                    self.attributeDictionary[key] = ""

                if key == "history" and self.attributeDictionary[
                        key] == "Default":
                    self.attributeDictionary[
                        key] = "created on " + datetime.datetime.today(
                        ).isoformat(" ")
                if self.attributeDictionary[key] == "Default" and (
                        key == "date_created" or key == "date_issued"):
                    self.attributeDictionary[key] = datetime.datetime.today(
                    ).isoformat(" ")
    def dynamic(self):
        
        # re-calculate model time using current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())

        # at the end of every month:
        # - aggregate/average the value at basin scale:
        # - then report it to the netcdf file:
        if self.modelTime.endMonth == True:

            # values from grace:
            grace_value = pcr.cover(vos.netcdf2PCRobjClone(\
                          self.output_files['one_degree_tws_month_anomaly']['grace'],\
                          "lwe_thickness",\
                          str(self.modelTime.fulldate), "mid-month",\
                          self.input_files["basin30minmap"]), 0.0)
            #
            basin_grace = pcr.areatotal(self.cell_area * grace_value, self.catchment)/\
                          pcr.areatotal(self.cell_area, self.catchment)

            # values from pcr-globwb simulation:
            model_value = pcr.cover(vos.netcdf2PCRobjClone(\
                          self.output_files['one_degree_tws_month_anomaly']['model'],\
                          "pcrglobwb_tws",\
                          str(self.modelTime.fulldate), "end-month",\
                          self.input_files["basin30minmap"]), 0.0)
            #
            basin_model = pcr.areatotal(self.cell_area * model_value, self.catchment)/\
                          pcr.areatotal(self.cell_area, self.catchment)

            # reporting
            timeStamp = datetime.datetime(self.modelTime.year,\
                                          self.modelTime.month,\
                                          self.modelTime.day,0)
            # write grace 
            self.output.data2NetCDF(self.output_files["basinscale_tws_month_anomaly"]['grace'],\
                                    "lwe_thickness",\
                                    pcr.pcr2numpy(basin_grace,vos.MV),\
                                    timeStamp)
            # write model
            self.output.data2NetCDF(self.output_files["basinscale_tws_month_anomaly"]['model'],\
                                    "pcrglobwb_tws",\
                                    pcr.pcr2numpy(basin_model,vos.MV),\
                                    timeStamp)

        # at the last dynamic time step 
        # - prepare annual anomaly time series
        # - evaluate the pcr-globwb model results to grace time series (monthly and annual)
        if self.modelTime.currTime == self.modelTime.endTime:

            # prepare annual anomaly time series
            self.prepare_annual_anomaly()

            # evaluate the pcr-globwb model results to grace time series 
            # (monthly & annual resolution - basin & one degree scale)
            self.evaluate_to_grace_data()
    def dynamic(self):
        
        # re-calculate current model time using current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())

        # open input data 
        referencePotET = vos.netcdf2PCRobjClone(\
                             self.input_files['referencePotET']['file_name'], \
                             self.input_files['referencePotET']['variable_name'], \
                             str(self.modelTime.fulldate), \
                             useDoy = None, \
                             cloneMapFileName = self.cloneMapFileName)
        cropKC = {}
        for lc_type in ["forest", "grassland", "irrPaddy", "irrNonPaddy"]:
            cropKC[lc_type] = vos.netcdf2PCRobjClone(\
                                  self.input_files['cropKC'][lc_type], \
                                  self.input_files['cropKC']['variable_name'], \
                                  str(self.modelTime.fulldate), 
                                  useDoy = None,
                                  cloneMapFileName = self.cloneMapFileName)
               
        # calculate
        potential_evaporation = {}
        for lc_type in ["forest", "grassland", "irrPaddy", "irrNonPaddy"]:
            potential_evaporation[lc_type] = referencePotET * cropKC[lc_type]
        
        # reporting for daily values
        timeStamp = datetime.datetime(self.modelTime.year,\
                                      self.modelTime.month,\
                                      self.modelTime.day,0)
        for lc_type in ["forest", "grassland", "irrPaddy", "irrNonPaddy"]:
            file_name = self.output['folder'] + "/daily_potential_evaporation_" + self.variable_unit + "_" + lc_type + ".nc"
            self.netcdf_report.data2NetCDF(file_name,\
                                           self.variable_name,\
                                           pcr.pcr2numpy(potential_evaporation[lc_type], vos.MV),\
                                           timeStamp)

        # reporting for monthly values
        # - reset at the beginning of the month:
        if self.modelTime.isFirstDayOfMonth:
            for lc_type in ["forest", "grassland", "irrPaddy", "irrNonPaddy"]:
                self.monthly_accumulator[lc_type] = pcr.scalar(0.0)
        # - accumulate until the last day of the month:
        for lc_type in ["forest", "grassland", "irrPaddy", "irrNonPaddy"]:
            self.monthly_accumulator[lc_type] = self.monthly_accumulator[lc_type] + potential_evaporation[lc_type]
        if self.modelTime.endMonth:
            for lc_type in ["forest", "grassland", "irrPaddy", "irrNonPaddy"]:
                file_name = self.output['folder'] + "/monthly_potential_evaporation_" + self.variable_unit + "_" + lc_type + ".nc"
                
                print file_name
                
                self.netcdf_report.data2NetCDF(file_name,\
                                               self.variable_name,\
                                               pcr.pcr2numpy(self.monthly_accumulator[lc_type]/calendar.monthrange(self.modelTime.year, self.modelTime.month)[1], vos.MV),\
                                               timeStamp)
Example #22
0
def estimate_iterations_kin_wave(Q, Beta, alpha, timestepsecs, dx, mv):
    if (pcr.pcr2numpy(Q, mv)).max() > 0:
        celerity = pcr.ifthen(Q > 0.0, 1.0 / (alpha * Beta * Q**(Beta - 1)))
        courant = (timestepsecs / dx) * celerity
        np_courant = pcr.pcr2numpy(courant, mv)
    else:
        np_courant = np.zeros(pcr.pcr2numpy(Q, mv).shape) + mv
    np_courant[np_courant == mv] = np.nan
    try:
        it_kin = int(np.ceil(1.25 * (np.nanpercentile(np_courant, 95))))
    except:
        it_kin = 1

    return it_kin
Example #23
0
 def get_model_dimensions(self):
     """Function to set model dimensions"""
     self.nLat = int(self.cloneMapAttributes['rows'])
     self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(self.cloneMap), vos.MV))[::-1]
     self.nLon = int(self.cloneMapAttributes['cols'])
     self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(self.cloneMap), vos.MV))
     self.nCell = int(np.sum(self.landmask))
     self.nLayer = 3         # FIXED        
     self.dimensions = {
         'time'     : None,
         'depth'    : np.arange(self.nLayer), # TODO - put nComp in config section [SOIL]
         'lat'      : self.latitudes,
         'lon'      : self.longitudes,
     }
Example #24
0
def lookupResRegMatr(ReserVoirLocs, values, hq, JDOY):

    np_res_ids = pcr.pcr2numpy(ReserVoirLocs, 0)
    npvalues = pcr.pcr2numpy(values, 0)
    out = np.copy(npvalues) * 0.0

    if len(hq) > 0:
        for key in hq:
            value = npvalues[np.where(np_res_ids == key)]

            val = np.interp(value, hq[key][:, 0], hq[key][:, JDOY])

            out[np.where(np_res_ids == key)] = val

    return pcr.numpy2pcr(pcr.Scalar, out, 0)
Example #25
0
def lookupResRegMatr(ReserVoirLocs, values, hq, JDOY):

    np_res_ids = pcr.pcr2numpy(ReserVoirLocs, 0)
    npvalues = pcr.pcr2numpy(values, 0)
    out = np.copy(npvalues) * 0.0

    if len(hq) > 0:
        for key in hq:
            value = npvalues[np.where(np_res_ids == key)]

            val = np.interp(value, hq[key][:, 0], hq[key][:, JDOY])

            out[np.where(np_res_ids == key)] = val

    return pcr.numpy2pcr(pcr.Scalar, out, 0)
Example #26
0
    def __init__(self, cfgfile):

        # Read the configuration file
        self.config = configparser.RawConfigParser()
        self.config.read(cfgfile)

        #-Define missing values (demMV should be a scalar value)
        self.mv = False
        self.demMV = -9999.

        #-Input path and output path
        self.input_path = self.config.get('FOLDERS', 'input_path')
        self.results_path = self.config.get('FOLDERS', 'results_path')

        #-Read the RGI dbf as a pandas dataframe and add columns for dH, dH_dem, hmin, and Tau
        rgi = Dbf5(
            os.path.join(self.input_path,
                         self.config.get('INPUT_FILES', 'rgiDBF')))
        rgi = rgi.to_dataframe()
        #-unique glacier ids
        self.uIDs = pd.unique(rgi['GLACID'])
        rgi.set_index('GLACID', inplace=True)
        rgi = rgi[['Area', 'Zmin', 'Zmax', 'Slope', 'Lmax']]
        rgi['dH'] = rgi['Zmax'] - rgi['Zmin']
        rgi['dH_dem'] = np.nan
        rgi['hmin'] = np.nan
        rgi['Tau'] = np.nan
        self.rgi = rgi

        #-Read dem
        self.dem = pcr.readmap(
            os.path.join(self.input_path,
                         self.config.get('INPUT_FILES', 'demMap')))
        self.dem = pcr.pcr2numpy(self.dem, self.demMV)
        #-Read GlacIDs
        self.glacID = pcr.readmap(
            os.path.join(self.input_path,
                         self.config.get('INPUT_FILES', 'glacidMap')))
        self.glacID = pcr.pcr2numpy(self.glacID, self.mv)

        #-Parameters
        self.eIntervals = self.config.getint('PARAMETERS', 'eIntervals')
        self.rho = self.config.getfloat('PARAMETERS', 'rho')
        self.g = self.config.getfloat('PARAMETERS', 'g')
        self.n = self.config.getint('PARAMETERS', 'n')
        self.r = self.config.getfloat('PARAMETERS', 'r')
        self.f = self.config.getfloat('PARAMETERS', 'f')
        self.hga = self.config.getfloat('PARAMETERS', 'hga')
Example #27
0
def zonalSumArea(nominalMap, areaClass):
    """Memory efficient method to sum up the surface area of the different 
        classes in the nominal map. Separate by the regions in areaClass
        
        input:
            nominalMap: nominal map, e.g. ecotope map
            areaClass: regions to compute surface areas over
    """ 
    #-create a pointMap of the output locations, one for each areaClass
    outputPointMap = pcrr.pointPerClass(areaClass)
    
    #-iniate output DataFrame
    dfInit = pcrr.getCellValues(outputPointMap, mapList = [areaClass], columns = ['areaClass'])
 
    #-loop over the classes in nominalMap and compute the summed area per areaClass
    IDs = np.unique(pcr.pcr2numpy(nominalMap, -9999))[1:]
    dfList = []
    for ID in IDs[:]:
        pcrID = pcr.nominal(ID)
        pcr.setglobaloption('unittrue')
        IDArea = pcr.ifthen(nominalMap == pcrID, pcr.cellarea())
        sectionSum = pcr.areatotal(IDArea, areaClass)
        df = pcrr.getCellValues(outputPointMap, [sectionSum], [ID])
            # df columns = rowIdx, colIdx, ID
        df = df.drop(['rowIdx', 'colIdx'],  axis=1)
        dfList.append(df)
        
    dfOut = dfInit.join(dfList)
    #return dfInit, df, dfOut, dfList
    return dfOut
Example #28
0
def fillMVBoundingBox(inMap, fillValue, xmin, xmax, ymin, ymax):
    ''' fill missing values in BB with fillValue '''
    #- create dictionary with map attributes and extract 
    pcr.report(inMap, 'temp.map')
    mapAttr = getMapAttr('temp.map')
    nrRows  = mapAttr['rows']
    nrCols  = mapAttr['columns']
    cellSize= mapAttr['cell_length']
    mapXmin = mapAttr['xUL']
    mapXmax = mapXmin + nrCols*cellSize - cellSize
    mapYmax = mapAttr['yUL']
    mapYmin = mapYmax - nrRows*cellSize + cellSize
    
    #- determine array indices of bounding box
    X = np.linspace(mapXmin,mapXmax,num=nrCols,endpoint=True)
    Y = np.linspace(mapYmin,mapYmax,num=nrRows,endpoint=True)
    indexXmin = np.searchsorted(X, xmin)
    indexXmax = np.searchsorted(X, xmax)
    indexYmin = 1+ len(Y) - np.searchsorted(Y, ymin, side='right')
    indexYmax = 1+ len(Y) - np.searchsorted(Y, ymax, side='right')
    #- fill bounding box with new value
    mapArray = pcr.pcr2numpy(inMap, -9999)
    box = mapArray[indexYmax:indexYmin, indexXmin:indexXmax]
    box[box == -9999] = fillValue
    mapArray[indexYmax:indexYmin, indexXmin:indexXmax] = box
    return pcr.numpy2pcr(pcr.Scalar, mapArray, -9999)
Example #29
0
 def testNominalRaster2Array(self):
   raster = pcraster.readmap("areaarea_Class.map")
   mv = 99
   array = pcraster.pcr2numpy(raster, mv)
   self.assertTrue(isinstance(array[0][0], numpy.int32))
   self.assertEqual(array[0][0], 2)
   self.assertEqual(array[0][1], 6)
   self.assertEqual(array[0][2], 2)
   self.assertEqual(array[0][3], 2)
   self.assertEqual(array[0][4], mv)
   self.assertEqual(array[1][0], 6)
   self.assertEqual(array[1][1], 6)
   self.assertEqual(array[1][2], 2)
   self.assertEqual(array[1][3], 2)
   self.assertEqual(array[1][4], 2)
   self.assertEqual(array[2][0], 6)
   self.assertEqual(array[2][1], 6)
   self.assertEqual(array[2][2], 0)
   self.assertEqual(array[2][3], 0)
   self.assertEqual(array[2][4], 0)
   self.assertEqual(array[3][0], 6)
   self.assertEqual(array[3][1], 6)
   self.assertEqual(array[3][2], 0)
   self.assertEqual(array[3][3], 0)
   self.assertEqual(array[3][4], 0)
   self.assertEqual(array[4][0], 6)
   self.assertEqual(array[4][1], 3)
   self.assertEqual(array[4][2], 3)
   self.assertEqual(array[4][3], 4)
   self.assertEqual(array[4][4], 4)
Example #30
0
 def testLddRaster2Array(self):
   raster = pcraster.readmap("accu_Ldd.map")
   mv = 99
   array = pcraster.pcr2numpy(raster, mv)
   self.assertTrue(isinstance(array[0][0], numpy.uint8))
   self.assertEqual(array[0][0], 2)
   self.assertEqual(array[0][1], 2)
   self.assertEqual(array[0][2], 2)
   self.assertEqual(array[0][3], 1)
   self.assertEqual(array[0][4], 1)
   self.assertEqual(array[1][0], 2)
   self.assertEqual(array[1][1], 2)
   self.assertEqual(array[1][2], 1)
   self.assertEqual(array[1][3], 1)
   self.assertEqual(array[1][4], 1)
   self.assertEqual(array[2][0], 3)
   self.assertEqual(array[2][1], 2)
   self.assertEqual(array[2][2], 1)
   self.assertEqual(array[2][3], 4)
   self.assertEqual(array[2][4], 1)
   self.assertEqual(array[3][0], 3)
   self.assertEqual(array[3][1], 2)
   self.assertEqual(array[3][2], 1)
   self.assertEqual(array[3][3], 4)
   self.assertEqual(array[3][4], 4)
   self.assertEqual(array[4][0], 6)
   self.assertEqual(array[4][1], 5)
   self.assertEqual(array[4][2], 4)
   self.assertEqual(array[4][3], 4)
   self.assertEqual(array[4][4], 4)
Example #31
0
 def spatial(self):
     """Computes requruired biosafe output for a spatial domain"""
     
     #-determine a representative points for each floodplain section        
     points = pcrr.representativePoint(self.sections)
     clone = pcr.defined(self.sections)
     pcr.setglobaloption('unittrue')
     xcoor = pcr.xcoordinate(clone)
     ycoor = pcr.ycoordinate(clone)
     geoDf = pcrr.getCellValues(points, \
                             mapList = [points, xcoor, ycoor],\
                             columns = ['ID', 'xcoor', 'ycoor'])        
     geoDf.set_index('ID', inplace=True, drop=False)
     geoDf.drop(['rowIdx', 'colIdx', 'ID'], axis=1, inplace=True)
     
     #-compupte the required biosafe parameters for all sections
     sectionIDs = np.unique(pcr.pcr2numpy(self.sections,-9999))[1:]
     ll = []
     for sectionID in sectionIDs:
         ll.append(self.sectionScores(sectionID))
     paramLL = zip(*ll)
             
     dfParamLL = []
     for ii in range(len(self.params)):
         bsScores = pd.concat(paramLL[ii], axis=1).T
         bsScores = bsScores.join(geoDf)
         bsScores.index.name = 'ID'
         bsScores.columns.name = self.params[ii]
         dfParamLL.append(bsScores)
     
     return dfParamLL
Example #32
0
    def dynamic(self):

        # re-calculate current model time using current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())

        # processing done only at the last day of the month
        if self.modelTime.isLastDayOfYear():

            logger.info("Reading runoff for time %s", self.modelTime.currTime)
            annual_input_file = self.input_file %(str(self.modelTime.currTime.year), \
                                                  str(self.modelTime.currTime.year))
            self.cell_value = vos.netcdf2PCRobjClone(annual_input_file, "automatic", \
                                                     str(self.modelTime.fulldate), \
                                                     useDoy = None, \
                                                     cloneMapFileName = self.clonemap_file_name, \
                                                     LatitudeLongitude = True)
            self.cell_value = pcr.cover(self.total_runoff, 0.0)

            logger.info("Calculating basin value for time %s",
                        self.modelTime.currTime)
            self.basin_value = pcr.catchmenttotal(
                self.total_runoff * self.cell_area,
                self.ldd_network) / self.basin_area

            # reporting
            # - time stamp for reporting
            timeStamp = datetime.datetime(self.modelTime.year,\
                                          self.modelTime.month,\
                                          self.modelTime.day,\
                                          0)
            logger.info("Reporting for time %s", self.modelTime.currTime)
            self.netcdf_report.data2NetCDF(self.output_file, \
                                           "total_flow", \
                                           pcr.pcr2numpy(self.total_flow, vos.MV), \
                                           timeStamp)
Example #33
0
def getCellValues(pointMap, mapList=[], columns=[]):
    """ Get the cell values of the maps in mapList at the locations of pointMap
    """
    #-determine where the indices are True
    arr = pcr.pcr2numpy(pcr.boolean(pointMap), 0).astype('bool')
    indices = np.where(arr == True)

    #-loop over the points in pointMap
    pcr.setglobaloption('unitcell')
    ll = []
    for rowIdx, colIdx in zip(indices[0], indices[1]):
        line = []
        line.append(rowIdx)
        line.append(colIdx)
        for pcrMap in mapList:
            line.append(
                pcr.cellvalue(pcrMap, np.int(rowIdx + 1),
                              np.int(colIdx + 1))[0])
        ll.append(line)

    #-optionally add column names
    if len(columns) == len(mapList):
        columnNames = ['rowIdx', 'colIdx'] + columns
    else:
        columnNames = ['rowIdx', 'colIdx'] + \
                    ['map' + str(ii) for ii in range(1, 1 + len(mapList), 1)]

    #-return as Pandas DataFrame
    return pd.DataFrame(np.array(ll), columns=columnNames)
Example #34
0
 def testLddRaster2Array(self):
     raster = pcraster.readmap("accu_Ldd.map")
     mv = 99
     array = pcraster.pcr2numpy(raster, mv)
     self.assertTrue(isinstance(array[0][0], numpy.uint8))
     self.assertEqual(array[0][0], 2)
     self.assertEqual(array[0][1], 2)
     self.assertEqual(array[0][2], 2)
     self.assertEqual(array[0][3], 1)
     self.assertEqual(array[0][4], 1)
     self.assertEqual(array[1][0], 2)
     self.assertEqual(array[1][1], 2)
     self.assertEqual(array[1][2], 1)
     self.assertEqual(array[1][3], 1)
     self.assertEqual(array[1][4], 1)
     self.assertEqual(array[2][0], 3)
     self.assertEqual(array[2][1], 2)
     self.assertEqual(array[2][2], 1)
     self.assertEqual(array[2][3], 4)
     self.assertEqual(array[2][4], 1)
     self.assertEqual(array[3][0], 3)
     self.assertEqual(array[3][1], 2)
     self.assertEqual(array[3][2], 1)
     self.assertEqual(array[3][3], 4)
     self.assertEqual(array[3][4], 4)
     self.assertEqual(array[4][0], 6)
     self.assertEqual(array[4][1], 5)
     self.assertEqual(array[4][2], 4)
     self.assertEqual(array[4][3], 4)
     self.assertEqual(array[4][4], 4)
Example #35
0
    def _report(self, data, identifier, timestamp):
        """
        Report function to store an attribue maps. In the traditional PCRaster 
        the report function writes a map straight to a location on disk, with
        a directory structure and filename which is representative of the attribute
        and timestep at which the reporting takes place. This can cause some 
        problems thougg. For example, a model reports 8 attributes, and the model
        has 24 timesteps. This will result in 192 map files, each of which (due to the
        PCRaster file format) is uncompressed, does not have detailed georeference
        information, does not contain overviews for quick zooming out, and is in 
        the wrong projection (that of the model, rather than web mercator that we
        need).

        In GEMS we choose a slightly different approach and use GeoTiff as the 
        storage mechanism. The variable self._report_layers contains the model 
        outputs and has the following structure:

        {
            '<attribute_name>' : [ (<data_array>, <utc_timestamp>), (...), (...) ]
        }

        This way, accessing self._report_layers['snow_depth'] will return an array
        of tuples, of which each tuple contains a numpy array and a timestamp.

        To get a list of the reported attributes we can simply use list(self._report_layers)

        Every time an attribute is reported it is added to this variable, and 
        at the end of the model run the _report_postprocess() is called, which turns
        each reported attribute into a separate geotiff file where the bands in
        the geotiff file correspond to one of the timesteps. Performing operations
        on these stacked geotiffs (such as creating overviews, requesting subsections,
        slices, or reprojecting) is much more efficient than on separate files.
        """
        try:
            if identifier in self.reporting:
                logger.debug("Reporting map '%s' (datatype:%s timestep:%d timestamp:%s)"%(identifier, self.reporting[identifier]["datatype"], self.timestep, self.timestamp))
                # Crop by the mask. Set all data outside mask to nodata value
                data = ifthenelse(self._mask, data, -9999)            
                # Convert to numpy array. The numpy array will be added to stack
                # of maps, one for each timestep.
                data = pcr2numpy(data, -9999)
                #
                #Todo: implement some kind of clamp functionality here. if 'clamp' is set to true 
                #      on the symbolizer for this output attribute, set all values
                #      above the max to the max value, and all below the min to the 
                #      min value. Allow overriding this in the model's report function like:
                #      report(data,'map', clamp=True) and then use pcraster/numpy to clamp the data.
                #
                (rows,cols) = data.shape
                if identifier not in list(self._report_layers):
                    self._report_layers.update({identifier:[]})
                self._report_layers[identifier].append((data, timestamp))
            else:
                logger.error("Don't know how to report '%s', please specify in the 'reporting' section of your model configuration."%(identifier))
        except:
            logger.error("An exception occurred while trying to report '%s'"%(identifier))
            return False
        else:
            logger.debug(" - Reporting map '%s' completed."%(identifier))
            return True
Example #36
0
 def testNominalRaster2Array(self):
     raster = pcraster.readmap("areaarea_Class.map")
     mv = 99
     array = pcraster.pcr2numpy(raster, mv)
     self.assertTrue(isinstance(array[0][0], numpy.int32))
     self.assertEqual(array[0][0], 2)
     self.assertEqual(array[0][1], 6)
     self.assertEqual(array[0][2], 2)
     self.assertEqual(array[0][3], 2)
     self.assertEqual(array[0][4], mv)
     self.assertEqual(array[1][0], 6)
     self.assertEqual(array[1][1], 6)
     self.assertEqual(array[1][2], 2)
     self.assertEqual(array[1][3], 2)
     self.assertEqual(array[1][4], 2)
     self.assertEqual(array[2][0], 6)
     self.assertEqual(array[2][1], 6)
     self.assertEqual(array[2][2], 0)
     self.assertEqual(array[2][3], 0)
     self.assertEqual(array[2][4], 0)
     self.assertEqual(array[3][0], 6)
     self.assertEqual(array[3][1], 6)
     self.assertEqual(array[3][2], 0)
     self.assertEqual(array[3][3], 0)
     self.assertEqual(array[3][4], 0)
     self.assertEqual(array[4][0], 6)
     self.assertEqual(array[4][1], 3)
     self.assertEqual(array[4][2], 3)
     self.assertEqual(array[4][3], 4)
     self.assertEqual(array[4][4], 4)
Example #37
0
def select_area(raster):

    # Get the list of IDs from the raster, use it for the dropdown field
    raster_np = pcraster.pcr2numpy(raster, -9999)

    raster_values = numpy.unique(raster_np)

    raster_unique = numpy.unique(raster_values)

    sections = numpy.delete(raster_unique, numpy.where(raster_unique == -9999))

    hover = HoverTool(tooltips=[
        ("Floodplain section", "@image"),
    ])
    p = plot(raster, hover=hover)

    display(p)

    style = {'description_width': 'initial'}
    w = ipywidgets.SelectMultiple(options=sections,
                                  rows=8,
                                  description='Sections:',
                                  layout=Layout(width="50%"),
                                  style=style,
                                  disabled=False)

    display(w)
    return w
Example #38
0
    def dynamic(self):

        # re-calculate current model time using current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())

        # processing done only at the last day of the month
        if self.modelTime.isLastDayOfMonth():
            
            logger.info("Reading runoff for time %s", self.modelTime.currTime)
            self.total_runoff = vos.netcdf2PCRobjClone(self.totat_runoff_input_file, "total_runoff",\
                                                       str(self.modelTime.fulldate), 
                                                       useDoy = None,
                                                       cloneMapFileName = self.clonemap_file_name,\
                                                       LatitudeLongitude = True)
            self.total_runoff = pcr.cover(self.total_runoff, 0.0)
            
            logger.info("Calculating total inflow and internal inflow for time %s", self.modelTime.currTime)
            self.total_flow    = pcr.catchmenttotal(self.total_runoff * self.cell_area, self.ldd_network)
            self.internal_flow = pcr.areatotal(self.total_runoff  * self.cell_area, self.sub_catchment)
            # - convert values to m3/s
            number_of_days_in_a_month = self.modelTime.day
            self.total_flow         = self.total_flow    / (number_of_days_in_a_month * 24. * 3600.)
            self.internal_flow      = self.internal_flow / (number_of_days_in_a_month * 24. * 3600.)
            # - limit the values to the landmask only
            self.total_flow         = pcr.ifthen(self.landmask, self.total_flow)
            self.internal_flow      = pcr.ifthen(self.landmask, self.internal_flow)
            
            logger.info("Extrapolating or time %s", self.modelTime.currTime)
            # Purpose: To avoid missing value data while being extracted by cdo command
            self.total_flow         = pcr.cover(self.total_flow,    pcr.windowmaximum(self.total_flow,    0.125)) 
            self.internal_flow      = pcr.cover(self.internal_flow, pcr.windowaverage(self.internal_flow, 0.125)) 

            # reporting 
            # - time stamp for reporting
            timeStamp = datetime.datetime(self.modelTime.year,\
                                          self.modelTime.month,\
                                          self.modelTime.day,\
                                          0)
            logger.info("Reporting for time %s", self.modelTime.currTime)
            self.netcdf_report.data2NetCDF(self.total_flow_output_file, \
                                           "total_flow", \
                                           pcr.pcr2numpy(self.total_flow, vos.MV), \
                                           timeStamp)
            self.netcdf_report.data2NetCDF(self.internal_flow_output_file, \
                                           "internal_flow", \
                                           pcr.pcr2numpy(self.internal_flow, vos.MV), \
                                           timeStamp)
Example #39
0
def pcr2col(listOfMaps, MV, selection='ONE_TRUE'):
    """converts a set of maps to a column array: X, Y, map values
       selection can be set to ALL, ALL_TRUE, ONE_TRUE"""

    #-intersect all maps and get X and Y coordinates
    intersection = pcr.boolean(pcr.cover(listOfMaps[0], 0))
    for mapX in listOfMaps[1:]:
        intersection = intersection | pcr.boolean(pcr.cover(mapX, 0))
    pcr.setglobaloption("unittrue")
    xCoor = pcr.ifthen(intersection, pcr.xcoordinate(intersection))
    yCoor = pcr.ifthen(intersection, pcr.ycoordinate(intersection))
    pcr.setglobaloption("unitcell")

    #-initiate outArray with xCoor and yCoor
    xCoorArr = pcr.pcr2numpy(xCoor, MV)
    yCoorArr = pcr.pcr2numpy(yCoor, MV)
    nRows, nCols = xCoorArr.shape
    nrCells = nRows * nCols
    outArray = np.hstack((xCoorArr.reshape(nrCells,
                                           1), yCoorArr.reshape(nrCells, 1)))

    #-add subsequent maps
    for mapX in listOfMaps:
        arr = pcr.pcr2numpy(mapX, MV).reshape(nrCells, 1)
        outArray = np.hstack((outArray, arr))

    #-subset output based on selection criterium
    ll = []
    nrMaps = len(listOfMaps)
    if selection == 'ONE_TRUE':
        for line in outArray:
            nrMV = len(line[line == MV])
            if nrMV < nrMaps:
                ll.append(line)
            else:
                pass
        outArray = np.array(ll)
    elif selection == 'ALL_TRUE':
        for line in outArray:
            if MV not in line:
                ll.append(line)
            else:
                pass
        outArray = np.array(ll)
    elif selection == 'ALL':
        pass
    return outArray
def readPCRmapClone(v,
                    cloneMapFileName,
                    tmpDir,
                    absolutePath=None,
                    isLddMap=False,
                    cover=None,
                    isNomMap=False):
    # v: inputMapFileName or floating values
    # cloneMapFileName: If the inputMap and cloneMap have different clones,
    #                   resampling will be done.
    logger.debug('read file/values: ' + str(v))
    if v == "None":
        #~ PCRmap = str("None")
        PCRmap = None  # 29 July: I made an experiment by changing the type of this object.
    elif not re.match(r"[0-9.-]*$", v):
        if absolutePath != None: v = getFullPath(v, absolutePath)
        # print v
        # print cloneMapFileName
        sameClone = isSameClone(v, cloneMapFileName)
        if sameClone == True:
            PCRmap = pcr.readmap(v)
        else:
            # resample using GDAL:
            output = tmpDir + 'temp.map'
            warp = gdalwarpPCR(v, output, cloneMapFileName, tmpDir, isLddMap,
                               isNomMap)
            # read from temporary file and delete the temporary file:
            PCRmap = pcr.readmap(output)
            if isLddMap == True:
                PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
            if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
            if isNomMap == True:
                PCRmap = pcr.ifthen(pcr.scalar(PCRmap) > 0., PCRmap)
            if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
            if os.path.isdir(tmpDir):
                shutil.rmtree(tmpDir)
            os.makedirs(tmpDir)
    else:
        PCRmap = pcr.spatial(pcr.scalar(float(v)))
    if cover != None:
        PCRmap = pcr.cover(PCRmap, cover)
    co = None
    cOut = None
    err = None
    warp = None
    del co
    del cOut
    del err
    del warp
    stdout = None
    del stdout
    stderr = None
    del stderr

    # SM: revisit this
    PCRmap = pcr.pcr2numpy(PCRmap, np.nan)

    return PCRmap
Example #41
0
def lookupResFunc(ReserVoirLocs, values, sh, dirLookup):

    np_res_ids = pcr.pcr2numpy(ReserVoirLocs, 0)
    npvalues = pcr.pcr2numpy(values, 0)
    out = np.copy(npvalues) * 0.0

    if len(sh) > 0:
        for key in sh:
            value = npvalues[np.where(np_res_ids == key)]

            if dirLookup == "0-1":
                val = np.interp(value, sh[key][:, 0], sh[key][:, 1])
            if dirLookup == "1-0":
                val = np.interp(value, sh[key][:, 1], sh[key][:, 0])

            out[np.where(np_res_ids == key)] = val

    return pcr.numpy2pcr(pcr.Scalar, out, 0)
Example #42
0
def lookupResFunc(ReserVoirLocs, values, sh, dirLookup):

    np_res_ids = pcr.pcr2numpy(ReserVoirLocs, 0)
    npvalues = pcr.pcr2numpy(values, 0)
    out = np.copy(npvalues) * 0.0

    if len(sh) > 0:
        for key in sh:
            value = npvalues[np.where(np_res_ids == key)]

            if dirLookup == "0-1":
                val = np.interp(value, sh[key][:, 0], sh[key][:, 1])
            if dirLookup == "1-0":
                val = np.interp(value, sh[key][:, 1], sh[key][:, 0])

            out[np.where(np_res_ids == key)] = val

    return pcr.numpy2pcr(pcr.Scalar, out, 0)
Example #43
0
def retrieveMapValue(pcrX,coordinates):
    #-retrieves values from a map and returns an array conform the IDs stored in properties
    nrRows= coordinates.shape[0]
    x= np.ones((nrRows))* MV
    tmpIDArray= pcr.pcr2numpy(pcrX,MV)
    for iCnt in xrange(nrRows):
      row,col= coordinates[iCnt,:]
      if row != MV and col != MV:
        x[iCnt]= tmpIDArray[row,col]
    return x
Example #44
0
def get_rowColAboveThreshold(map, threshold):
    npMap = pcr.pcr2numpy(map, -9999)
    (nr, nc) = np.shape(npMap)
    for r in range(0, nr):
        for c in range(0, nc):
            if npMap[r, c] != -9999:
                if np.abs(npMap[r, c]) > threshold:


                    return (r, c)
Example #45
0
def points_to_map(in_map, xcor, ycor, tolerance):
    """
    Returns a map with non zero values at the points defined
    in X, Y pairs. It's goal is to replace the pcraster col2map program.

    tolerance should be 0.5 to select single points
    Performance is not very good and scales linear with the number of points


    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate (array or single value)
        - ycor - y coordinate (array or single value)
        - tolerance - tolerance in cell units. 0.5 selects a single cell\
        10 would select a 10x10 block of cells

    Output:
        - Map with values burned in. 1 for first point, 2 for second and so on
    """
    point = in_map * 0.0

    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    cell_length = float(pcr.celllength())

    # simple check to use both floats and numpy arrays
    try:
        c = xcor.ndim
    except:
        xcor = np.array([xcor])
        ycor = np.array([ycor])

    # Loop over points and "burn in" map
    for n in range(0, xcor.size):
        if Verbose:
            print(n)
        diffx = x - xcor[n]
        diffy = y - ycor[n]
        col_ = np.absolute(diffx) <= (cell_length * tolerance)  # cellsize
        row_ = np.absolute(diffy) <= (cell_length * tolerance)  # cellsize
        point = point + pcr.numpy2pcr(pcr.Scalar, ((col_ * row_) * (n + 1)), np.nan)

    return pcr.ordinal(point)
    def __init__(self, cloneMapFile, attribute=None, cellSizeInArcMinutes=None):
        		
        # cloneMap
        # - the cloneMap must be at 5 arc min resolution
        cloneMap = pcr.readmap(cloneMapFile)
        cloneMap = pcr.boolean(1.0)
        
        # latitudes and longitudes
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        #~ # properties of the clone map
        #~ # - number of rows and columns
        #~ self.nrRows       = np.round(pcr.clone().nrRows())    
        #~ self.nrCols       = np.round(pcr.clone().nrCols())  
        #~ # - upper right coordinate, unit: arc degree ; must be integer (without decimals)
        #~ self.minLongitude = np.round(pcr.clone().west() , 0)         
        #~ self.maxLatitude  = np.round(pcr.clone().north(), 0)
        #~ # - cell resolution, unit: arc degree
        #~ self.cellSize     = pcr.clone().cellSize()
        #~ if cellSizeInArcMinutes != None: self.cellSize = cellSizeInArcMinutes / 60.0 
        #~ # - lower right coordinate, unit: arc degree ; must be integer (without decimals)
        #~ self.maxLongitude = np.round(self.minLongitude + self.cellSize*self.nrCols, 0)         
        #~ self.minLatitude  = np.round(self.maxLatitude  - self.cellSize*self.nrRows, 0)
        #~ 
        #~ # latitudes and longitudes for netcdf files
        #~ latMin = self.minLatitude  + self.cellSize / 2
        #~ latMax = self.maxLatitude  - self.cellSize / 2
        #~ lonMin = self.minLongitude + self.cellSize / 2
        #~ lonMax = self.maxLongitude - self.cellSize / 2
        #~ self.longitudes = np.arange(lonMin,lonMax+self.cellSize, self.cellSize)
        #~ self.latitudes=   np.arange(latMax,latMin-self.cellSize,-self.cellSize)
        
        # netCDF format and attributes:
        self.format = 'NETCDF4'
        self.attributeDictionary = {}
        if attribute == None:
            self.attributeDictionary['institution'] = "None"
            self.attributeDictionary['title'      ] = "None"
            self.attributeDictionary['description'] = "None"
        else:
            self.attributeDictionary = attribute
Example #47
0
def idtoid(sourceidmap, targetidmap, valuemap):
    """
    tranfer the values from valuemap at the point id's in sourceidmap to the areas in targetidmap.

    :param pointmap:
    :param areamap:
    :param valuemap:
    :return:
    """

    _area = pcr.pcr2numpy(targetidmap, 0.0).copy().astype(float)
    _pt = pcr.pcr2numpy(sourceidmap, 0.0).copy()
    _val = pcr.pcr2numpy(valuemap, 0.0).copy()

    for val in np.unique(_pt):
        if val > 0:  #
            _area[_area == val] = np.mean(_val[_pt == val])

    retmap = pcr.numpy2pcr(pcr.Scalar, _area, 0.0)

    return retmap
    def dynamic(self):
        
        # update model time using the current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())

        # reading gross and netto values:
        if self.modelTime.isLastDayOfMonth():

            gross_value = vos.netcdf2PCRobjClone(ncFile  = self.input_netcdf['gross_file_name'],
                                                 varName = self.input_netcdf['gross_variable_name'],
                                                 dateInput = str(self.modelTime.fulldate))

            netto_value = vos.netcdf2PCRobjClone(ncFile  = self.input_netcdf['netto_file_name'],
                                                 varName = self.input_netcdf['netto_variable_name'],
                                                 dateInput = str(self.modelTime.fulldate))
            
            # covering with zero and convert the unit to 
            gross_value = pcr.cover(gross_value, 0.0)/self.cell_area                                     
            netto_value = pcr.cover(netto_value, 0.0)/self.cell_area                                     

        # reporting
        if self.modelTime.isLastDayOfMonth():

            # put the output in a dictionary
            output = {}
            output[self.output_netcdf['gross_variable_name']] = pcr.pcr2numpy(gross_value, vos.MV)
            output[self.output_netcdf['netto_variable_name']] = pcr.pcr2numpy(netto_value, vos.MV)
            
            # time stamp 
            timeStamp = datetime.datetime(self.modelTime.year,\
                                          self.modelTime.month,\
                                          self.modelTime.day,0)
            # to netcdf 
            self.output.dataList2NetCDF(self.output_netcdf['file_name'],\
                                       [self.output_netcdf['gross_variable_name'], self.output_netcdf['netto_variable_name']],\
                                        output,\
                                        timeStamp)

        # closing the file at the end of
        if self.modelTime.isLastTimeStep(): self.output.close(self.output_netcdf['file_name'])
Example #49
0
def getcols():
    """
    returns the number of columns in the current map

    Input:
        - -

    Output:
        - nr of columns in the current clonemap as a scalar
    """
    a = pcr.pcr2numpy(pcr.celllength(), np.nan).shape[1]

    return a
    def __init__(self,cloneMapFileName,netcdf_attribute_description):
        		
        # cloneMap
        cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName))
        cloneMap = pcr.boolean(pcr.scalar(1.0))
        
        # latitudes and longitudes
        self.latitudes  = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1]
        self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV))

        # netCDF format and attributes:
        self.format = 'NETCDF3_CLASSIC'
        self.attributeDictionary = {}
        self.attributeDictionary['institution']  = "European Commission - JRC"
        self.attributeDictionary['title'      ]  = "EFAS-Meteo 5km for the Rhine-Meuse basin"
        self.attributeDictionary['source'     ]  = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014"
        self.attributeDictionary['history'    ]  = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014."
        self.attributeDictionary['references' ]  = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262"
        self.attributeDictionary['comment'    ]  = "Please use this dataset only for Hyper-Hydro test bed experiments. " 
        self.attributeDictionary['comment'    ] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). "
        self.attributeDictionary['comment'    ] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. "

        self.attributeDictionary['description']  = netcdf_attribute_description
Example #51
0
 def testBooleanRaster2Array(self):
   raster = pcraster.readmap("and_Expr1.map")
   mv = 99
   array = pcraster.pcr2numpy(raster, mv)
   self.assertTrue(isinstance(array[0][0], numpy.uint8))
   self.assertEqual(array[0][0], 1)
   self.assertEqual(array[0][1], 1)
   self.assertEqual(array[0][2], 0)
   self.assertEqual(array[1][0], 0)
   self.assertEqual(array[1][1], mv)
   self.assertEqual(array[1][2], 0)
   self.assertEqual(array[2][0], 1)
   self.assertEqual(array[2][1], 1)
   self.assertEqual(array[2][2], 0)
Example #52
0
 def testScalarRaster2Array(self):
   raster = pcraster.readmap("abs_Expr.map")
   mv = 99
   array = pcraster.pcr2numpy(raster, mv)
   self.assertTrue(isinstance(array[0][0], numpy.float32))
   self.assertEqual(array[0][0],  2.0)
   self.assertEqual(array[0][1], -7.0)
   self.assertEqual(array[0][2],  3.5)
   self.assertEqual(array[1][0], -8.5)
   self.assertAlmostEqual(array[1][1], 3.6, 6)
   self.assertEqual(array[1][2], mv)
   self.assertEqual(array[2][0],  0.0)
   self.assertEqual(array[2][1], 14.0)
   self.assertAlmostEqual(array[2][2], -0.8)
Example #53
0
def returnMapValue(pcrX,x,coord):
    #-retrieves value from an array and update values in the map
    if x.ndim == 1:
      nrRows= 1

    tempIDArray= pcr.pcr2numpy(pcrX,MV)
    #print tempIDArray
    temporary= tempIDArray
    nrRows= coord.shape[0]
    for iCnt in xrange(nrRows):
      row,col= coord[iCnt,:]
      if row != MV and col != MV:
        tempIDArray[row,col]= (x[iCnt])
       # print iCnt,row,col,x[iCnt]
    pcrX= pcr.numpy2pcr(pcr.Scalar,tempIDArray,MV)
    return pcrX
Example #54
0
 def test_round_trip_numpy_array_with_nan(self):
     array = numpy.array([
         [-2, -1],
         [ 0, numpy.nan],
         [ 1, 2 ]
     ])
     nrRows, nrCols, cellSize = 3, 2, 1.0
     west, north = 0.0, 0.0
     pcraster.setclone(nrRows, nrCols, cellSize, west, north)
     raster = pcraster.numpy2pcr(pcraster.Scalar, array, numpy.nan)
     array2 = pcraster.pcr2numpy(raster, numpy.nan)
     self.assertEqual(array2[0][0], -2)
     self.assertEqual(array2[0][1], -1)
     self.assertEqual(array2[1][0], 0)
     self.assertTrue(numpy.isnan(array2[1][1]))
     self.assertEqual(array2[2][0], 1)
     self.assertEqual(array2[2][1], 2)
Example #55
0
 def testOrdinalRaster2Array(self):
   raster = pcraster.readmap("succ_Expr.map")
   mv = 99
   array = pcraster.pcr2numpy(raster, mv)
   self.assertTrue(isinstance(array[0][0], numpy.int32))
   self.assertEqual(array[0][0],-5)
   self.assertEqual(array[0][1], 9)
   self.assertEqual(array[0][2], 9)
   self.assertEqual(array[0][3], 0)
   self.assertEqual(array[1][0],-5)
   self.assertEqual(array[1][1],-5)
   self.assertEqual(array[1][2], 9)
   self.assertEqual(array[1][3], 0)
   self.assertEqual(array[2][0],-5)
   self.assertEqual(array[2][1], 9)
   self.assertEqual(array[2][2], 9)
   self.assertEqual(array[2][3], 2)
   self.assertEqual(array[3][0], 4)
   self.assertEqual(array[3][1], 4)
   self.assertEqual(array[3][2], 9)
   self.assertEqual(array[3][3],mv)
Example #56
0
  def test_pcr_as_numpy(self):
      array = numpy.array([
          [-2.0, -1.0      ],
          [ 0.0,  numpy.nan],
          [ 1.0,  2.0      ]
      ])
      nrRows, nrCols, cellSize = 3, 2, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      # Create a raster.
      raster = pcraster.numpy2pcr(pcraster.Scalar, array, 999.0)

      # Test type checking.
      with self.assertRaises(Exception) as context_manager:
          pcraster.pcr_as_numpy(5)
      self.assertEqual(str(context_manager.exception),
          "Expecting a PCRaster field")

      # Create an array referencing the raster.
      array2 = pcraster.pcr_as_numpy(raster)
      self.assertEqual(array2[0][0], -2)
      self.assertEqual(array2[0][1], -1)
      self.assertEqual(array2[1][0], 0)
      self.assertTrue(numpy.isnan(array2[1][1]))
      self.assertEqual(array2[2][0], 1)
      self.assertEqual(array2[2][1], 2)

      # Change the array and verify the raster changed too.
      array2[0][0] = 5.0
      self.assertEqual(pcraster.pcr2numpy(raster, 999.0)[0][0], 5.0)

      # Replace exising raster and verify the array still behaves.
      raster += 1.0
      self.assertEqual(array2[0][0], 5.0)

      # Delete the raster and verify the array still behaves.
      del raster
      self.assertEqual(array2[0][0], 5.0)
      self.assertEqual(array2[2][1], 2.0)
Example #57
0
  def test_003(self):
      """ pcr2numpy should not run out of memory """

      nrRows, nrCols, cellSize = 200, 200, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      raster = pcraster.uniform(1)

      process = psutil.Process(os.getpid())
      mem = process.memory_info()
      init_mem = mem.rss / 2**10

      nr_iterations = 50
      mem_increase = False

      # small memory increase can occur at runtime
      # allow for, but less than iterations * size(raster)
      max_diff = 400

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      raster = pcraster.spatial(pcraster.boolean(1))

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      raster = pcraster.nominal(pcraster.uniform(1) * 10)

      for it in range(0, nr_iterations):
        pcraster.pcr2numpy(raster, numpy.nan)
        mem = process.memory_info()
        curr_mem = mem.rss / 2**10
        if curr_mem - init_mem > max_diff:
          mem_increase = True

      self.assertEqual(mem_increase, False)
        time_index_in_netcdf_file = i_year - str_year + 1
        
        value_from_the_hydrological_year_1 = vos.netcdf2PCRobjClone(input_files['file_name']["hydrological_year_1"][var], \
                                                                    varDict.netcdf_short_name[var], time_index_in_netcdf_file,\
                                                                    useDoy = "Yes",
                                                                    cloneMapFileName  = clone_map_file,\
                                                                    LatitudeLongitude = True,\
                                                                    specificFillValue = None)
        
        value_from_the_hydrological_year_2 = vos.netcdf2PCRobjClone(input_files['file_name']["hydrological_year_2"][var], \
                                                                    varDict.netcdf_short_name[var], time_index_in_netcdf_file,\
                                                                    useDoy = "Yes",
                                                                    cloneMapFileName  = clone_map_file,\
                                                                    LatitudeLongitude = True,\
                                                                    specificFillValue = None)
        
        # merging two hydrological years 
        value_for_this_year = pcr.ifthenelse(pcr.scalar(hydro_year_type) == 1, value_from_the_hydrological_year_1, \
                                                                               value_from_the_hydrological_year_2)
        value_for_this_year = pcr.cover(value_for_this_year, 0.0)
        
        if landmask_only: value_for_this_year = pcr.ifthen(landmask, value_for_this_year)
        
        # report to a netcdf file
        ncFileName = output_files[var]['file_name']
        msg = "Saving to the netcdf file: " + str(ncFileName)
        logger.info(msg)
        time_stamp_used = datetime.datetime(i_year, 12, 31, 0)
        netcdf_report.data2NetCDF(ncFileName, varDict.netcdf_short_name[var], pcr.pcr2numpy(value_for_this_year, vos.MV), time_stamp_used)

def get_return_period_gumbel(p_zero_in_pcraster, loc_in_pcraster, scale_in_pcraster, flvol_in_pcraster, max_return_period = np.longdouble(1e9), max_return_period_that_can_be_assigned = 1000.):
    """
    Transforms a unique, or array of flood volumes into the belonging return
    periods, according to gumbel parameters (belonging to non-zero part of the
    distribution) and a zero probability
    Inputs:
        p_zero:        probability that flood volume is zero
        loc:           Gumbel location parameter (of non-zero part of distribution)
        scale:         Gumbel scale parameter (of non-zero part of distribution)
        flvol:         Flood volume that will be transformed to return period
        max_return_period: maximum return period considered. This maximum is needed to prevent that floating point
                        precision becomes a problem (default: 1e9)
    This function is copied from: https://repos.deltares.nl/repos/Hydrology/trunk/GLOFRIS/src/rp_bias_corr.py
    """
    
    np.seterr(divide='ignore')
    np.seterr(invalid='ignore')

    # convert all pcraster maps to numpy arrays
    p_zero  = np.longdouble(pcr.pcr2numpy(p_zero_in_pcraster, vos.MV))
    loc     = np.longdouble(pcr.pcr2numpy(loc_in_pcraster   , vos.MV))
    scale   = np.longdouble(pcr.pcr2numpy(scale_in_pcraster , vos.MV))
    flvol   = np.longdouble(pcr.pcr2numpy(flvol_in_pcraster , vos.MV))
    
    # maximum values for the given max_return_period
    max_p = 1.0-1.0/max_return_period
    max_p_residual = np.minimum(np.maximum((max_p-p_zero)/(1.0-p_zero), 0.0), 1.0)
    max_p_residual[p_zero >= max_p] = 0.0 
    max_reduced_variate = -np.log(-np.log((max_p_residual)))

    #~ print np.nanmin(max_p_residual)
    #~ print np.nanmax(max_p_residual)
    #~ print np.amin(max_p_residual)
    #~ print np.amax(max_p_residual)
#~ 
    #~ print np.nanmin(max_reduced_variate)
    #~ print np.nanmax(max_reduced_variate)
    #~ print np.amin(max_reduced_variate)
    #~ print np.amax(max_reduced_variate)

    # compute the gumbel reduced variate belonging to the Gumbel distribution (excluding any zero-values): reduced_variate = (flvol-loc)/scale
    # make sure that the reduced variate does not exceed the one
    reduced_variate = np.longdouble(np.minimum((flvol-loc)/scale, max_reduced_variate))

    #~ print np.nanmin(reduced_variate)
    #~ print np.nanmax(reduced_variate)
    #~ print np.amin(reduced_variate)
    #~ print np.amax(reduced_variate)
    
    # transform the reduced variate into a probability (residual after removing the zero volume probability)
    p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.longdouble(reduced_variate))), np.longdouble(0.0)), np.longdouble(1.0))
    #~ p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.longdouble(reduced_variate))), 0.0), 1.0)

    #~ print np.nanmin(p_residual)
    #~ print np.nanmax(p_residual)
    #~ print np.amin(p_residual)
    #~ print np.amax(p_residual)

    # transform from non-zero only distribution to zero-included distribution
    p = np.minimum(np.maximum(p_residual*(1.0 - p_zero) + p_zero, p_zero), max_p)  # never larger than max_p # 
    p = np.maximum(0.0, p)
    
    #~ print ""
    #~ print "p"
    #~ print np.nanmin(p)
    #~ print np.nanmax(p)
#~ 
    #~ print np.amin(p)
    #~ print np.amax(p)
    #~ print "p"
    #~ print ""

    # transform into a return period    
    return_period = 1.0/(1.0-p)
    
    # assign maximum return period for p_zero = 1.0 (value is always zero)
    return_period[p_zero == 1.0000] = max_return_period

    # limit return period to maximum return period that can be assigned
    return_period[return_period > max_return_period_that_can_be_assigned] = max_return_period_that_can_be_assigned

    # cell with mv will be still mv
    return_period[p_zero == vos.MV] = vos.MV
    
    #~ # test values (calculated in the original Hessel's script, not needed)
    #~ test_p = p == 1    
    #~ diff_p = 1.0 - p
    
    print np.nanmin(return_period)
    print np.nanmax(return_period)
    print np.amin(return_period)
    print np.amax(return_period)

    print np.nanmin(return_period[p_zero != vos.MV])
    print np.nanmax(return_period[p_zero != vos.MV])
    print np.amin(return_period[p_zero != vos.MV])
    print np.amax(return_period[p_zero != vos.MV])

    #~ pcr.report(pcr.numpy2pcr(pcr.Scalar, np.float64(return_period), vos.MV), "return_period.map")
    #~ cmd = "aguila " + "return_period.map"
    #~ os.system(cmd)

    return pcr.numpy2pcr(pcr.Scalar, np.float64(return_period), vos.MV)
            print fulldate

            monthRange = float(calendar.monthrange(int(iYear), int(iMonth))[1])
            print(monthRange)
            
            index = index + 1
            for iVar in range(0,len(varNames)):      
                
                # reading values from the input netcdf files (30min)
                demand_volume_30min = vos.netcdf2PCRobjClone(inputDirectory+inputFiles[iVar],\
                                                             inputVarNames[iVar],
                                                             fulldate,
                                                             None,
                                                             cloneMapFileName) * 1000.*1000./ monthRange   # unit: m3/day
                demand_volume_30min = pcr.ifthen(landmask, demand_volume_30min)
                
                # demand in m/day
                demand = demand_volume_30min /\
                         pcr.areatotal(cellArea, uniqueIDs30min)
                
                # covering the map with zero
                pcrValue = pcr.cover(demand, 0.0)  # unit: m/day                       

                # convert values to pcraster object
                varField = pcr.pcr2numpy(pcrValue, vos.MV)

                # write values to netcdf files
                tssNetCDF.writePCR2NetCDF(ncFileName,varNames[iVar],varField,timeStamp,posCnt = index - 1)