예제 #1
0
def netcdf2PCRobj(ncFile,varName,dateInput):
    # EHS (04 APR 2013): To convert netCDF (tss) file to PCR file.
    # The cloneMap is globally defined (outside this method).
    
    # Get netCDF file and variable name:
    f = nc.Dataset(ncFile)
    varName = str(varName)

    # date
    date = dateInput
    if isinstance(date, str) == True: date = \
                    datetime.datetime.strptime(str(date),'%Y-%m-%d') 
    date = datetime.datetime(date.year,date.month,date.day)
    
    # time index (in the netCDF file)
    nctime = f.variables['time']  # A netCDF time variable object.
    idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
                                                 select='exact') 
    
    # convert to PCR object and close f
    outPCR = pcr.numpy2pcr(pcr.Scalar,(f.variables[varName][idx].data), \
                             float(f.variables[varName]._FillValue))
    f.close(); f = None ; del f
    # PCRaster object
    return (outPCR)
예제 #2
0
 def testOrdinalArray2Raster(self):
   pcraster.setclone("boolean_Expr.map")
   try:
     a = numpy.array([ [0, 1, 3], [20, -3, -2], [0, 9, 8] ])
     result = pcraster.numpy2pcr(pcraster.Ordinal, a, 20)
     self.failUnless(self.mapEqualsValidated(result, "ordinal_Result.map"), "test1: %s" % ("Result and validated result are not the same"))
   except Exception as exception:
     self.failUnless(False, "test1: %s" % (str(exception)))
예제 #3
0
 def testBooleanArray2Raster(self):
   pcraster.setclone("boolean_Expr.map")
   try:
     a = numpy.array([ [1, 0, 1], [20, 1, 1], [1, 1, 0] ], numpy.uint8) # uint8 is bugzilla #271
     result = pcraster.numpy2pcr(pcraster.Boolean, a, 20)
     self.failUnless(self.mapEqualsValidated(result, "boolean_Result.map"), "test1: %s" % ("Result and validated result are not the same"))
   except Exception as exception:
     self.failUnless(False, "test1: %s" % (str(exception)))
예제 #4
0
 def testLddArray2Raster(self):
   pcraster.setclone("and_Expr1.map")
   try:
     a = numpy.array([ [6, 5, 4], [6, 8, 7], [8, 8, 8] ])
     result = pcraster.numpy2pcr(pcraster.Ldd, a, 20)
     self.failUnless(self.mapEqualsValidated(result, "ldd_Result.map"), "test1: %s" % ("Result and validated result are not the same"))
   except Exception as exception:
     self.failUnless(False, "test1: %s" % (str(exception)))
예제 #5
0
 def testScalarArray2Raster(self):
   pcraster.setclone("boolean_Expr.map")
   try:
     a = numpy.array([ [0.5, 0.34202, 0.310676], [20, 0, -0.981627], [0.707107, 0.144356, 0.0174524] ])
     result = pcraster.numpy2pcr(pcraster.Scalar, a, 20)
     self.failUnless(self.mapEqualsValidated(result, "sin_Result.map"), "test1: %s" % ("Result and validated result are not the same"))
   except Exception as exception:
     self.failUnless(False, "test1: %s" % (str(exception)))
예제 #6
0
 def testDirectionalArray2Raster(self):
   pcraster.setclone("boolean_Expr.map")
   pcraster.setglobaloption("degrees")
   try:
     a = numpy.array([ [math.radians(350),math.radians(0),math.radians(0.01)],\
        [20,math.radians(350),math.radians(21)],\
        [math.radians(359),math.radians(40),math.radians(0)] ])
     result = pcraster.numpy2pcr(pcraster.Directional, a, 20)
     self.failUnless(self.mapEqualsValidated(result, "directional_Result2.map"), "test1: %s" % ("Result and validated result are not the same"))
   except Exception as exception:
     self.failUnless(False, "test1: %s" % (str(exception)))
예제 #7
0
def lookupResRegMatr(ReserVoirLocs, values, hq, JDOY):

    np_res_ids = pcr.pcr2numpy(ReserVoirLocs, 0)
    npvalues = pcr.pcr2numpy(values, 0)
    out = np.copy(npvalues) * 0.0

    if len(hq) > 0:
        for key in hq:
            value = npvalues[np.where(np_res_ids == key)]

            val = np.interp(value, hq[key][:, 0], hq[key][:, JDOY])

            out[np.where(np_res_ids == key)] = val

    return pcr.numpy2pcr(pcr.Scalar, out, 0)
예제 #8
0
def returnMapValue(pcrX,x,coord):
    #-retrieves value from an array and update values in the map
    if x.ndim == 1:
      nrRows= 1

    tempIDArray= pcr.pcr2numpy(pcrX,MV)
    #print tempIDArray
    temporary= tempIDArray
    nrRows= coord.shape[0]
    for iCnt in xrange(nrRows):
      row,col= coord[iCnt,:]
      if row != MV and col != MV:
        tempIDArray[row,col]= (x[iCnt])
       # print iCnt,row,col,x[iCnt]
    pcrX= pcr.numpy2pcr(pcr.Scalar,tempIDArray,MV)
    return pcrX
예제 #9
0
 def test_round_trip_numpy_array_with_nan(self):
     array = numpy.array([
         [-2, -1],
         [ 0, numpy.nan],
         [ 1, 2 ]
     ])
     nrRows, nrCols, cellSize = 3, 2, 1.0
     west, north = 0.0, 0.0
     pcraster.setclone(nrRows, nrCols, cellSize, west, north)
     raster = pcraster.numpy2pcr(pcraster.Scalar, array, numpy.nan)
     array2 = pcraster.pcr2numpy(raster, numpy.nan)
     self.assertEqual(array2[0][0], -2)
     self.assertEqual(array2[0][1], -1)
     self.assertEqual(array2[1][0], 0)
     self.assertTrue(numpy.isnan(array2[1][1]))
     self.assertEqual(array2[2][0], 1)
     self.assertEqual(array2[2][1], 2)
예제 #10
0
    def set_value(self, long_var_name, src):
        """
        Set the values(s) in a map using a numpy array as source

        :var long_var_name: identifier of a variable in the model.
        :var src: all values to set for the given variable. If only one value
                  is present a uniform map will be set in the wflow model.
        """
        # first part should be the component name
        self.bmilogger.debug("set_value: " + long_var_name)
        cname = long_var_name.split(self.comp_sep)
        if cname[0] in self.bmimodels:
            self.bmimodels[cname[0]].set_value(cname[1], src)
            if self.wrtodisk:
                pcr.report(
                    pcr.numpy2pcr(pcr.Scalar, src, -999),
                    long_var_name + "_set_" + str(self.get_current_time()) + ".map",
                )
예제 #11
0
def lookupResFunc(ReserVoirLocs, values, sh, dirLookup):

    np_res_ids = pcr.pcr2numpy(ReserVoirLocs, 0)
    npvalues = pcr.pcr2numpy(values, 0)
    out = np.copy(npvalues) * 0.0

    if len(sh) > 0:
        for key in sh:
            value = npvalues[np.where(np_res_ids == key)]

            if dirLookup == "0-1":
                val = np.interp(value, sh[key][:, 0], sh[key][:, 1])
            if dirLookup == "1-0":
                val = np.interp(value, sh[key][:, 1], sh[key][:, 0])

            out[np.where(np_res_ids == key)] = val

    return pcr.numpy2pcr(pcr.Scalar, out, 0)
예제 #12
0
def points_to_map(in_map, xcor, ycor, tolerance):
    """
    Returns a map with non zero values at the points defined
    in X, Y pairs. It's goal is to replace the pcraster col2map program.

    tolerance should be 0.5 to select single points
    Performance is not very good and scales linear with the number of points


    Input:
        - in_map - map to determine coordinates from
        - xcor - x coordinate (array or single value)
        - ycor - y coordinate (array or single value)
        - tolerance - tolerance in cell units. 0.5 selects a single cell\
        10 would select a 10x10 block of cells

    Output:
        - Map with values burned in. 1 for first point, 2 for second and so on
    """
    point = in_map * 0.0

    x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan)
    y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan)
    cell_length = float(pcr.celllength())

    # simple check to use both floats and numpy arrays
    try:
        c = xcor.ndim
    except:
        xcor = np.array([xcor])
        ycor = np.array([ycor])

    # Loop over points and "burn in" map
    for n in range(0, xcor.size):
        if Verbose:
            print(n)
        diffx = x - xcor[n]
        diffy = y - ycor[n]
        col_ = np.absolute(diffx) <= (cell_length * tolerance)  # cellsize
        row_ = np.absolute(diffy) <= (cell_length * tolerance)  # cellsize
        point = point + pcr.numpy2pcr(pcr.Scalar, ((col_ * row_) * (n + 1)), np.nan)

    return pcr.ordinal(point)
예제 #13
0
    def gettimestep(self, timestep, logging, var="P"):
        """
        Gets a map for a single timestep. reads data in blocks assuming sequential access

        timestep: framework timestep (1-based)
        logging: python logging object
        var: variable to get from the file
        """

        if var in self.dataset.variables:
            np_step = self.alldat[var][
                timestep - 1,
                self.latidx.min() : self.latidx.max() + 1,
                self.lonidx.min() : self.lonidx.max() + 1,
            ]
            miss = float(self.dataset.variables[var]._FillValue)
            return pcr.numpy2pcr(pcr.Scalar, np_step, miss), True
        else:
            logging.debug("Var (" + var + ") not found returning map with 0.0")
            return pcr.cover(pcr.scalar(0.0)), False
예제 #14
0
    def get_value_at_indices(self, long_var_name, inds):
        """
        Get a numpy array of the values at the given indices

        :var long_var_name: identifier of a variable in the model:
        :var inds: List of list each tuple contains one index for each dimension of the given variable, i.e. each tuple indicates one element in the multi-dimensional variable array:

        :return: numpy array of values in the data type returned by the function get_var_type.
        """
        cname = long_var_name.split(self.comp_sep)

        if cname[0] in self.bmimodels:
            tmp = self.bmimodels[cname[0]].get_value(cname[1])
            if self.wrtodisk:
                pcr.report(
                    pcr.numpy2pcr(pcr.Scalar, tmp, -999),
                    long_var_name + "_get_" + str(self.get_current_time()) + ".map",
                )
            return self.bmimodels[cname[0]].get_value_at_indices(cname[1], inds)
        # else:
        return None
예제 #15
0
    def get_value(self, long_var_name):
        """
        Get the value(s) of a variable as a numpy array

        :var long_var_name: name of the variable
        :return: a np array of long_var_name
        """
        # first part should be the component name
        self.bmilogger.debug("get_value: " + long_var_name)
        cname = long_var_name.split(self.comp_sep)
        if cname[0] in self.bmimodels:
            tmp = self.bmimodels[cname[0]].get_value(cname[1])
            if self.wrtodisk:
                pcr.report(
                    pcr.numpy2pcr(pcr.Scalar, tmp, -999),
                    long_var_name + "_get_" + str(self.get_current_time()) + ".map",
                )
            return tmp
        else:
            self.bmilogger.error("get_value: " + long_var_name + " returning None!!!!")
            return None
예제 #16
0
def idtoid(sourceidmap, targetidmap, valuemap):
    """
    tranfer the values from valuemap at the point id's in sourceidmap to the areas in targetidmap.

    :param pointmap:
    :param areamap:
    :param valuemap:
    :return:
    """

    _area = pcr.pcr2numpy(targetidmap, 0.0).copy().astype(float)
    _pt = pcr.pcr2numpy(sourceidmap, 0.0).copy()
    _val = pcr.pcr2numpy(valuemap, 0.0).copy()

    for val in np.unique(_pt):
        if val > 0:  #
            _area[_area == val] = np.mean(_val[_pt == val])

    retmap = pcr.numpy2pcr(pcr.Scalar, _area, 0.0)

    return retmap
예제 #17
0
  def test_pcr_as_numpy(self):
      array = numpy.array([
          [-2.0, -1.0      ],
          [ 0.0,  numpy.nan],
          [ 1.0,  2.0      ]
      ])
      nrRows, nrCols, cellSize = 3, 2, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      # Create a raster.
      raster = pcraster.numpy2pcr(pcraster.Scalar, array, 999.0)

      # Test type checking.
      with self.assertRaises(Exception) as context_manager:
          pcraster.pcr_as_numpy(5)
      self.assertEqual(str(context_manager.exception),
          "Expecting a PCRaster field")

      # Create an array referencing the raster.
      array2 = pcraster.pcr_as_numpy(raster)
      self.assertEqual(array2[0][0], -2)
      self.assertEqual(array2[0][1], -1)
      self.assertEqual(array2[1][0], 0)
      self.assertTrue(numpy.isnan(array2[1][1]))
      self.assertEqual(array2[2][0], 1)
      self.assertEqual(array2[2][1], 2)

      # Change the array and verify the raster changed too.
      array2[0][0] = 5.0
      self.assertEqual(pcraster.pcr2numpy(raster, 999.0)[0][0], 5.0)

      # Replace exising raster and verify the array still behaves.
      raster += 1.0
      self.assertEqual(array2[0][0], 5.0)

      # Delete the raster and verify the array still behaves.
      del raster
      self.assertEqual(array2[0][0], 5.0)
      self.assertEqual(array2[2][1], 2.0)
예제 #18
0
    def gettimestep(self, timestep, logging, var="P", tsdatetime=None):
        """
        Gets a map for a single timestep. reads data in blocks assuming sequential access

        timestep: framework timestep (1-based)
        logging: python logging object
        var: variable to get from the file
        """
        ncindex = timestep - 1

        if var in self.dataset.variables:
            if tsdatetime != None:
                if tsdatetime.replace(tzinfo=None) != self.datetimelist[
                    ncindex
                ].replace(tzinfo=None):
                    logging.warning(
                        "Date/time of state ("
                        + var
                        + " in "
                        + self.fname
                        + ")does not match. Wanted "
                        + str(tsdatetime)
                        + " got "
                        + str(self.datetimelist[ncindex])
                    )

            np_step = self.dataset.variables[var][
                ncindex,
                self.latidx.min() : self.latidx.max() + 1,
                self.lonidx.min() : self.lonidx.max() + 1,
            ]

            miss = float(self.dataset.variables[var]._FillValue)
            return pcr.numpy2pcr(pcr.Scalar, np_step, miss), True
        else:
            # logging.debug("Var (" + var + ") not found returning map with 0.0")
            return pcr.cover(pcr.scalar(0.0)), False
예제 #19
0
    def gettimestep(self,
                    timestep,
                    logging,
                    tsdatetime=None,
                    var="P",
                    shifttime=False):
        """
        Gets a map for a single timestep. reads data in blocks assuming sequential access

        :var timestep: framework timestep (1-based)
        :var logging: python logging object
        :var var: variable to get from the file
        :var shifttime: is True start at 1 in the NC file (instead of 0)
        :var tsdatetime: Assumed date/time of this timestep

            without ensembles (dims = 3)
            window = data[dpos,latidx.min():latidx.max()+1,lonidx.min():lonidx.max()+1]
            
            with ensembles from Delft-FEWS (dims = 4):
            window = data[dpos,realization,latidx.min():latidx.max()+1,lonidx.min():lonidx.max()+1]
        """
        if shifttime:
            ncindex = timestep
        else:
            ncindex = timestep - 1

        ncindex = ncindex + self.offset

        if self.datetimelist.size < ncindex + 1:
            ncindex = self.datetimelist.size - 1

        if tsdatetime != None:
            if tsdatetime.replace(
                    tzinfo=None) != self.datetimelist[ncindex].replace(
                        tzinfo=None):
                logging.warning("Date/time does not match. Wanted " +
                                str(tsdatetime) + " got " +
                                str(self.datetimelist[ncindex]))
                import bisect

                pos = bisect.bisect_left(self.datetimelist,
                                         tsdatetime.replace(tzinfo=None))
                if pos >= self.datetimelist.size:
                    pos = self.datetimelist.size - 1
                    logging.warning(
                        "No matching date/time found using last date/time again..."
                    )
                self.offset = pos - ncindex
                logging.warning(
                    "Adjusting to the date/time at index and setting offset: "
                    + str(pos) + ":" + str(self.offset) + ":" +
                    str(self.datetimelist[pos]))
                ncindex = pos

        if var in self.alldat:
            # if ncindex == self.lstep:  # Read new block of data in mem
            #    logging.debug("reading new netcdf data block starting at: " + str(ncindex))
            #    for vars in self.alldat:
            #        self.alldat[vars] = self.dataset.variables[vars][ncindex:ncindex + self.maxsteps]
            #
            # self.fstep = ncindex
            # self.lstep = ncindex + self.maxsteps

            if len(self.alldat[var].dimensions) == 3:
                np_step = self.alldat[var][
                    ncindex - self.fstep,
                    self.latidx.min():self.latidx.max() + 1,
                    self.lonidx.min():self.lonidx.max() + 1, ]
            if len(self.alldat[var].dimensions) == 4:
                np_step = self.alldat[var][
                    ncindex - self.fstep, 0,
                    self.latidx.min():self.latidx.max() + 1,
                    self.lonidx.min():self.lonidx.max() + 1, ]

            miss = float(self.dataset.variables[var]._FillValue)
            if self.flip:
                return pcr.numpy2pcr(pcr.Scalar,
                                     np.flipud(np_step).copy(), miss), True
            else:
                return pcr.numpy2pcr(pcr.Scalar, np_step, miss), True
        else:
            # logging.debug("Var (" + var + ") not found returning 0")
            return pcr.cover(pcr.scalar(0.0)), False
예제 #20
0
def main():

    ### Read input arguments #####
    logfilename = 'wtools_static_maps.log'
    parser = OptionParser()
    usage = "usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option('-q', '--quiet',
                      dest='verbose', default=True, action='store_false',
                      help='do not print status messages to stdout')
    parser.add_option('-i', '--ini', dest='inifile', default=None,
                      help='ini file with settings for static_maps.exe')
    parser.add_option('-s', '--source',
                      dest='source', default='wflow',
                      help='Source folder containing clone (default=./wflow)')
    parser.add_option('-d', '--destination',
                      dest='destination', default='staticmaps',
                      help='Destination folder (default=./staticmaps)')
    parser.add_option('-r', '--river',
                      dest='rivshp', default=None,
                      help='river network polyline layer (ESRI Shapefile)')
    parser.add_option('-c', '--catchment',
                      dest='catchshp', default=None,
                      help='catchment polygon layer (ESRI Shapefile)')
    parser.add_option('-g', '--gauges',
                      dest='gaugeshp', default=None,
                      help='gauge point layer (ESRI Shapefile)')
    parser.add_option('-D', '--dem',
                      dest='dem_in', default=None,
                      help='digital elevation model (GeoTiff)')
    parser.add_option('-L', '--landuse',
                      dest='landuse', default=None,
                      help='land use / land cover layer (GeoTiff)')
    parser.add_option('-S', '--soiltype',
                      dest='soil', default=None,
                      help='soil type layer (GeoTiff)')
    parser.add_option('-V', '--vegetation',
                      dest='lai', default=None,
                      help='vegetation LAI layer location (containing 12 GeoTiffs <LAI00000.XXX.tif>)')
    parser.add_option('-O', '--other_maps',
                      dest='other_maps', default=None,
                      help='bracketed [] comma-separated list of paths to other maps that should be reprojected')
    parser.add_option('-C', '--clean',
                      dest='clean', default=False, action='store_true',
                      help='Clean the .xml files from static maps folder when finished')
    parser.add_option('-A', '--alltouch',
                      dest='alltouch', default=False, action='store_true',
                      help='option to burn catchments "all touching".\nUseful when catchment-size is small compared to cellsize')
    (options, args) = parser.parse_args()
    # parse other maps into an array
    options.other_maps = options.other_maps.replace(' ', '').replace('[', '').replace(']', '').split(',')

    options.source = os.path.abspath(options.source)
    clone_map = os.path.join(options.source, 'mask.map')
    clone_shp = os.path.join(options.source, 'mask.shp')
    clone_prj = os.path.join(options.source, 'mask.prj')
    
    if None in (options.inifile,
                options.rivshp,
                options.catchshp,
                options.dem_in):
        msg = """The following files are compulsory:
        - ini file
        - DEM (raster)
        - river (shape)
        - catchment (shape)
        """
        print(msg)
        parser.print_help()
        sys.exit(1)
    if not os.path.exists(options.inifile):
        print 'path to ini file cannot be found'
        sys.exit(1)
    if not os.path.exists(options.rivshp):
        print 'path to river shape cannot be found'
        sys.exit(1)
    if not os.path.exists(options.catchshp):
        print 'path to catchment shape cannot be found'
        sys.exit(1)
    if not os.path.exists(options.dem_in):
        print 'path to DEM cannot be found'
        sys.exit(1)    
        
    
    # open a logger, dependent on verbose print to screen or not
    logger, ch = wtools_lib.setlogger(logfilename, 'WTOOLS', options.verbose)

    # create directories # TODO: check if workdir is still necessary, try to keep in memory as much as possible

    # delete old files (when the source and destination folder are different)
    if np.logical_and(os.path.isdir(options.destination),
                      options.destination is not options.source):
        shutil.rmtree(options.destination)
    if options.destination is not options.source:
        os.makedirs(options.destination)

    # Read mask
    if not(os.path.exists(clone_map)):
        logger.error('Clone file {:s} not found. Please run create_grid first.'.format(clone_map))
        sys.exit(1)
    else:
        # set clone
        pcr.setclone(clone_map)
        # get the extent from clone.tif
        xax, yax, clone, fill_value = gis.gdal_readmap(clone_map, 'GTiff')
        trans = wtools_lib.get_geotransform(clone_map)
        extent = wtools_lib.get_extent(clone_map)
        xmin, ymin, xmax, ymax = extent
        zeros = np.zeros(clone.shape)
        ones = pcr.numpy2pcr(pcr.Scalar, np.ones(clone.shape), -9999)
        # get the projection from clone.tif
        srs = wtools_lib.get_projection(clone_map)
        unit_clone = srs.GetAttrValue('UNIT').lower()

    ### READ CONFIG FILE
    # open config-file
    config=wtools_lib.OpenConf(options.inifile)
    
    # read settings
    snapgaugestoriver = wtools_lib.configget(config, 'settings',
                                             'snapgaugestoriver',
                                              True, datatype='boolean')
    burnalltouching = wtools_lib.configget(config, 'settings',
                                           'burncatchalltouching',
                                            True, datatype='boolean')
    burninorder = wtools_lib.configget(config, 'settings',
                                       'burncatchalltouching',
                                       False, datatype='boolean')
    verticetollerance = wtools_lib.configget(config, 'settings',
                                             'vertice_tollerance',
                                             0.0001, datatype='float')
    
    ''' read parameters '''
    burn_outlets = wtools_lib.configget(config, 'parameters',
                                        'burn_outlets', 10000,
                                        datatype='int')
    burn_rivers = wtools_lib.configget(config, 'parameters',
                                       'burn_rivers', 200,
                                       datatype='int')
    burn_connections = wtools_lib.configget(config, 'parameters',
                                            'burn_connections', 100,
                                            datatype='int')
    burn_gauges = wtools_lib.configget(config, 'parameters',
                                       'burn_gauges', 100,
                                       datatype='int')
    minorder = wtools_lib.configget(config, 'parameters',
                                    'riverorder_min', 3,
                                    datatype='int')
    percentiles = np.array(
        config.get('parameters', 'statisticmaps', '0, 100').replace(
            ' ', '').split(','), dtype='float')
            
    # read the parameters for generating a temporary very high resolution grid
    if unit_clone == 'degree':
       cellsize_hr = wtools_lib.configget(config, 'parameters',
                                          'highres_degree', 0.0005,
                                          datatype='float')
    elif (unit_clone == 'metre') or (unit_clone == 'meter'):
       cellsize_hr = wtools_lib.configget(config, 'parameters',
                                          'highres_metre', 50,
                                          datatype='float') 
    
    cols_hr = int((float(xmax)-float(xmin))/cellsize_hr + 2)
    rows_hr = int((float(ymax)-float(ymin))/cellsize_hr + 2)
    hr_trans = (float(xmin), cellsize_hr, float(0),
                float(ymax), 0, -cellsize_hr)
    clone_hr = os.path.join(options.destination, 'clone_highres.tif')
    # make a highres clone as well!
    wtools_lib.CreateTif(clone_hr, rows_hr, cols_hr, hr_trans, srs, 0)

    # read staticmap locations
    catchment_map = wtools_lib.configget(config, 'staticmaps',
                                         'catchment', 'wflow_catchment.map')
    dem_map = wtools_lib.configget(config, 'staticmaps',
                                   'dem', 'wflow_dem.map')
    demmax_map = wtools_lib.configget(config, 'staticmaps',
                                      'demmax', 'wflow_demmax.map')
    demmin_map = wtools_lib.configget(config, 'staticmaps',
                                      'demmin', 'wflow_demmin.map')
    gauges_map = wtools_lib.configget(config, 'staticmaps',
                                      'gauges', 'wflow_gauges.map')
    landuse_map = wtools_lib.configget(config, 'staticmaps',
                                       'landuse', 'wflow_landuse.map')
    ldd_map = wtools_lib.configget(config, 'staticmaps',
                                   'ldd', 'wflow_ldd.map')
    river_map = wtools_lib.configget(config, 'staticmaps',
                                     'river', 'wflow_river.map')
    outlet_map = wtools_lib.configget(config, 'staticmaps',
                                      'outlet', 'wflow_outlet.map')
    riverlength_fact_map = wtools_lib.configget(config, 'staticmaps',
                                                'riverlength_fact',
                                                'wflow_riverlength_fact.map')
    soil_map = wtools_lib.configget(config, 'staticmaps',
                                    'soil', 'wflow_soil.map')
    streamorder_map = wtools_lib.configget(config, 'staticmaps',
                                           'streamorder',
                                           'wflow_streamorder.map')
    subcatch_map = wtools_lib.configget(config, 'staticmaps',
                                        'subcatch', 'wflow_subcatch.map')

    # read mask location (optional)
    masklayer = wtools_lib.configget(config, 'mask', 'masklayer', options.catchshp)


    # ???? empty = pcr.ifthen(ones == 0, pcr.scalar(0))

    # TODO: check if extents are correct this way
    # TODO: check what the role of missing values is in zeros and ones (l. 123 in old code)

    # first add a missing value to dem_in
    ds = gdal.Open(options.dem_in, gdal.GA_Update)
    RasterBand = ds.GetRasterBand(1)
    fill_val = RasterBand.GetNoDataValue()

    if fill_val is None:
        RasterBand.SetNoDataValue(-9999)
    ds = None
    
    # reproject to clone map: see http://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python
    # resample DEM
    logger.info('Resampling dem from {:s} to {:s}'.format(os.path.abspath(options.dem_in), os.path.join(options.destination, dem_map)))
    gis.gdal_warp(options.dem_in, clone_map, os.path.join(options.destination, dem_map), format='PCRaster', gdal_interp=gdalconst.GRA_Average)
    # retrieve amount of rows and columns from clone
    # TODO: make windowstats applicable to source/target with different projections. This does not work yet.
    # retrieve srs from DEM
    try:
        srs_dem = wtools_lib.get_projection(options.dem_in)
    except:
        logger.warning('No projection found in DEM, assuming WGS 1984 lat long')
        srs_dem = osr.SpatialReference()
        srs_dem.ImportFromEPSG(4326)
    clone2dem_transform = osr.CoordinateTransformation(srs,srs_dem)
    #if srs.ExportToProj4() == srs_dem.ExportToProj4():
    for percentile in percentiles:
        if percentile >= 100:
            logger.info('computing window maximum')
            percentile_dem = os.path.join(options.destination, 'wflow_dem_max.map')
        elif percentile <= 0:
            logger.info('computing window minimum')
            percentile_dem = os.path.join(options.destination, 'wflow_dem_min.map')
        else:
            logger.info('computing window {:d} percentile'.format(int(percentile)))
            percentile_dem = os.path.join(options.destination, 'wflow_dem_{:03d}.map'.format(int(percentile)))

        percentile_dem = os.path.join(options.destination, 'wflow_dem_{:03d}.map'.format(int(percentile)))
        stats = wtools_lib.windowstats(options.dem_in, len(yax), len(xax),
                               trans, srs, percentile_dem, percentile, transform=clone2dem_transform,logger=logger)
#    else:
#        logger.warning('Projections of DEM and clone are different. DEM statistics for different projections is not yet implemented')

    """

    # burn in rivers
    # first convert and clip the river shapefile
    # retrieve river shape projection, if not available assume EPSG:4326
    file_att = os.path.splitext(os.path.basename(options.rivshp))[0]
    ds = ogr.Open(options.rivshp)
    lyr = ds.GetLayerByName(file_att)
    extent = lyr.GetExtent()
    extent_in = [extent[0], extent[2], extent[1], extent[3]]
    try:
        # get spatial reference from shapefile
        srs_rivshp = lyr.GetSpatialRef()
        logger.info('Projection in river shapefile is {:s}'.format(srs_rivshp.ExportToProj4()))
    except:
        logger.warning('No projection found in {:s}, assuming WGS 1984 lat-lon'.format(options.rivshp))
        srs_rivshp = osr.SpatialReference()
        srs_rivshp.ImportFromEPSG(4326)
    rivprojshp = os.path.join(options.destination, 'rivshp_proj.shp')
    logger.info('Projecting and clipping {:s} to {:s}'.format(options.rivshp, rivprojshp))
    # TODO: Line below takes a very long time to process, the bigger the shapefile, the more time. How do we deal with this?
    call(('ogr2ogr','-s_srs', srs_rivshp.ExportToProj4(),'-t_srs', srs.ExportToProj4(), '-clipsrc', '{:f}'.format(xmin), '{:f}'.format(ymin), '{:f}'.format(xmax), '{:f}'.format(ymax), rivprojshp, options.rivshp))
    """

    # TODO: BURNING!!


    # project catchment layer to projection of clone
    file_att = os.path.splitext(os.path.basename(options.catchshp))[0]
    print options.catchshp
    ds = ogr.Open(options.catchshp)
    lyr = ds.GetLayerByName(file_att)
    extent = lyr.GetExtent()
    extent_in = [extent[0], extent[2], extent[1], extent[3]]
    try:
        # get spatial reference from shapefile
        srs_catchshp = lyr.GetSpatialRef()
        logger.info('Projection in catchment shapefile is {:s}'.format(srs_catchshp.ExportToProj4()))
    except:
        logger.warning('No projection found in {:s}, assuming WGS 1984 lat-lon'.format(options.catchshp))
        srs_catchshp = osr.SpatialReference()
        srs_catchshp.ImportFromEPSG(4326)
    catchprojshp = os.path.join(options.destination, 'catchshp_proj.shp')
    logger.info('Projecting {:s} to {:s}'.format(options.catchshp, catchprojshp))
    call(('ogr2ogr','-s_srs', srs_catchshp.ExportToProj4(),'-t_srs', srs.ExportToProj4(), '-clipsrc', '{:f}'.format(xmin), '{:f}'.format(ymin), '{:f}'.format(xmax), '{:f}'.format(ymax), catchprojshp, options.catchshp))

    #
    logger.info('Calculating ldd')
    ldddem = pcr.readmap(os.path.join(options.destination, dem_map))
    ldd_select=pcr.lddcreate(ldddem, 1e35, 1e35, 1e35, 1e35)
    pcr.report(ldd_select, os.path.join(options.destination, 'wflow_ldd.map'))

    # compute stream order, identify river cells
    streamorder = pcr.ordinal(pcr.streamorder(ldd_select))
    river = pcr.ifthen(streamorder >= pcr.ordinal(minorder), pcr.boolean(1))
    # find the minimum value in the DEM and cover missing values with a river with this value. Effect is none!! so now left out!
    # mindem = int(np.min(pcr.pcr2numpy(pcr.ordinal(os.path.join(options.destination, dem_map)),9999999)))
    # dem_resample_map = pcr.cover(os.path.join(options.destination, dem_map), pcr.scalar(river)*0+mindem)
    # pcr.report(dem_resample_map, os.path.join(options.destination, dem_map))
    pcr.report(streamorder, os.path.join(options.destination, streamorder_map))
    pcr.report(river, os.path.join(options.destination, river_map))

    # deal with your catchments
    if options.gaugeshp == None:
        logger.info('No gauges defined, using outlets instead')
        gauges = pcr.ordinal(
            pcr.uniqueid(
                pcr.boolean(
                    pcr.ifthen(pcr.scalar(ldd_select)==5,
                               pcr.boolean(1)
                               )
                )
            )
        )
        pcr.report(gauges, os.path.join(options.destination, gauges_map))
    # TODO: Add the gauge shape code from StaticMaps.py (line 454-489)
    # TODO: add river length map (see SticMaps.py, line 492-499)

    # report river length
    # make a high resolution empty map
    dem_hr_file = os.path.join(options.destination, 'dem_highres.tif')
    burn_hr_file = os.path.join(options.destination, 'burn_highres.tif')
    demburn_hr_file = os.path.join(options.destination, 'demburn_highres.map')
    riv_hr_file = os.path.join(options.destination, 'riv_highres.map')
    gis.gdal_warp(options.dem_in, clone_hr, dem_hr_file)
    # wtools_lib.CreateTif(riv_hr, rows_hr, cols_hr, hr_trans, srs, 0)
    file_att = os.path.splitext(os.path.basename(options.rivshp))[0]
    # open the shape layer
    ds = ogr.Open(options.rivshp)
    lyr = ds.GetLayerByName(file_att)
    gis.ogr_burn(lyr, clone_hr, -100, file_out=burn_hr_file,
                  format='GTiff', gdal_type=gdal.GDT_Float32, fill_value=0)
    # read dem and burn values and add
    xax_hr, yax_hr, burn_hr, fill = gis.gdal_readmap(burn_hr_file, 'GTiff')
    burn_hr[burn_hr==fill] = 0
    xax_hr, yax_hr, dem_hr, fill = gis.gdal_readmap(dem_hr_file, 'GTiff')
    dem_hr[dem_hr==fill] = np.nan
    demburn_hr = dem_hr + burn_hr
    demburn_hr[np.isnan(demburn_hr)] = -9999
    gis.gdal_writemap(demburn_hr_file, 'PCRaster', xax_hr, yax_hr, demburn_hr, -9999.)
    pcr.setclone(demburn_hr_file)
    demburn_hr = pcr.readmap(demburn_hr_file)
    ldd_hr = pcr.lddcreate(demburn_hr, 1e35, 1e35, 1e35, 1e35)
    pcr.report(ldd_hr, os.path.join(options.destination, 'ldd_hr.map'))
    pcr.setglobaloption('unitcell')
    riv_hr = pcr.scalar(pcr.streamorder(ldd_hr) >= minorder)*pcr.downstreamdist(ldd_hr)
    pcr.report(riv_hr, riv_hr_file)
    pcr.setglobaloption('unittrue')
    pcr.setclone(clone_map)
    logger.info('Computing river length')
    #riverlength = wt.windowstats(riv_hr,clone_rows,clone_columns,clone_trans,srs_clone,resultdir,'frac',clone2dem_transform)
    riverlength = wtools_lib.windowstats(riv_hr_file, len(yax), len(xax),
                                 trans, srs, os.path.join(options.destination, riverlength_fact_map), stat='fact', logger=logger)
    # TODO: nothing happends with the river lengths yet. Need to decide how to use these

    # report outlet map
    pcr.report(pcr.ifthen(pcr.ordinal(ldd_select)==5, pcr.ordinal(1)), os.path.join(options.destination, outlet_map))

    # report subcatchment map
    subcatchment = pcr.subcatchment(ldd_select, gauges)
    pcr.report(pcr.ordinal(subcatchment), os.path.join(options.destination, subcatch_map))

    # Report land use map
    if options.landuse == None:
        logger.info('No land use map used. Preparing {:s} with only ones.'.
                    format(os.path.join(options.destination, landuse_map)))
        pcr.report(pcr.nominal(ones), os.path.join(options.destination, landuse_map))
    else:
        logger.info('Resampling land use from {:s} to {:s}'.
                    format(os.path.abspath(options.landuse),
                           os.path.join(options.destination, os.path.abspath(landuse_map))))
        gis.gdal_warp(options.landuse,
                      clone_map,
                      os.path.join(options.destination, landuse_map),
                      format='PCRaster',
                      gdal_interp=gdalconst.GRA_Mode,
                      gdal_type=gdalconst.GDT_Int32)

    # report soil map
    if options.soil == None:
        logger.info('No soil map used. Preparing {:s} with only ones.'.
                    format(os.path.join(options.destination, soil_map)))
        pcr.report(pcr.nominal(ones), os.path.join(options.destination, soil_map))
    else:
        logger.info('Resampling soil from {:s} to {:s}'.
                    format(os.path.abspath(options.soil),
                           os.path.join(options.destination, os.path.abspath(soil_map))))
        gis.gdal_warp(options.soil,
                      clone_map,
                      os.path.join(options.destination, soil_map),
                      format='PCRaster',
                      gdal_interp=gdalconst.GRA_Mode,
                      gdal_type=gdalconst.GDT_Int32)

    if options.lai == None:
        logger.info('No vegetation LAI maps used. Preparing default maps {:s} with only ones.'.
                    format(os.path.join(options.destination, soil_map)))
        pcr.report(pcr.nominal(ones), os.path.join(options.destination, soil_map))
    else:
        dest_lai = os.path.join(options.destination, 'clim')
        os.makedirs(dest_lai)
        for month in range(12):
            lai_in = os.path.join(options.lai, 'LAI00000.{:03d}'.format(month + 1))
            lai_out = os.path.join(dest_lai, 'LAI00000.{:03d}'.format(month + 1))
            logger.info('Resampling vegetation LAI from {:s} to {:s}'.
                        format(os.path.abspath(lai_in),
                               os.path.abspath(lai_out)))
            gis.gdal_warp(lai_in,
                          clone_map,
                          lai_out,
                          format='PCRaster',
                          gdal_interp=gdalconst.GRA_Bilinear,
                          gdal_type=gdalconst.GDT_Float32)

    # report soil map
    if options.other_maps == None:
        logger.info('No other maps used. Skipping other maps.')
    else:
        logger.info('Resampling list of other maps...')
        for map_file in options.other_maps:
            map_name = os.path.split(map_file)[1]
            logger.info('Resampling a map from {:s} to {:s}'.
                        format(os.path.abspath(map_file),
                               os.path.join(options.destination, map_name)))
            gis.gdal_warp(map_file,
                          clone_map,
                          os.path.join(options.destination, map_name),
                          format='PCRaster',
                          gdal_interp=gdalconst.GRA_Mode,
                          gdal_type=gdalconst.GDT_Float32)


    if options.clean:
        wtools_lib.DeleteList(glob.glob(os.path.join(options.destination, '*.xml')),
                              logger=logger)
        wtools_lib.DeleteList(glob.glob(os.path.join(options.destination, 'clim', '*.xml')),
                              logger=logger)
        wtools_lib.DeleteList(glob.glob(os.path.join(options.destination, '*highres*')),
                              logger=logger)
예제 #21
0
def naturalLake(
    waterlevel,
    LakeLocs,
    LinkedLakeLocs,
    LakeArea,
    LakeThreshold,
    LakeStorFunc,
    LakeOutflowFunc,
    sh,
    hq,
    lake_b,
    lake_e,
    inflow,
    precip,
    pet,
    LakeAreasMap,
    JDOY,
    timestepsecs=86400,
):
    """
    Run Natural Lake module to compute the new waterlevel and outflow.
    Solves lake water balance with linearisation and iteration procedure,
    for any rating and storage curve.
    For the case where storage curve is S = AH and Q=b(H-Ho)^2, uses the direct
    solution from the Modified Puls Approach (LISFLOOD).


    :ivar waterlevel: water level H in the lake
    :ivar LakeLocs: location of lake's outlet
    :ivar LinkedLakeLocs: ID of linked lakes
    :ivar LakeArea: total lake area
    :ivar LakeThreshold: water level threshold Ho under which outflow is zero
    :ivar LakeStorFunc: type of lake storage curve
                        1: S = AH
                        2: S = f(H) from lake data and interpolation
    :ivar LakeOutflowFunc: type of lake rating curve
                           1: Q = f(H) from lake data and interpolation
                           2: General Q = b(H - Ho)^e
                           3: Case of Puls Approach Q = b(H - Ho)^2
    :ivar sh: data for storage curve
    :ivar hq: data for rating curve
    :ivar lake_b: rating curve coefficient
    :ivar lake_e: rating curve exponent
    :ivar inflow: inflow to the lake (surface runoff + river discharge + seepage)
    :ivar precip: precipitation map
    :ivar pet: PET map
    :ivar LakeAreasMap: lake extent map (for filtering P and PET)
    :ivar JDOY: Julian Day of Year to read storage/rating curve from data
    :ivar timestepsecs: model timestep in seconds

    :returns: waterlevel, outflow, prec_av, pet_av, storage
    """

    mv = -999.0
    LakeZeros = LakeArea * 0.0

    waterlevel_start = waterlevel

    inflow = pcr.ifthen(pcr.boolean(LakeLocs), inflow)

    prec_av = pcr.ifthen(pcr.boolean(LakeLocs),
                         pcr.areaaverage(precip, LakeAreasMap))
    pet_av = pcr.ifthen(pcr.boolean(LakeLocs),
                        pcr.areaaverage(pet, LakeAreasMap))

    ### Modified Puls Approach (Burek et al., 2013, LISFLOOD) ###
    #ResOutflowFunc = 3

    #Calculate lake factor and SI parameter
    LakeFactor = pcr.ifthenelse(LakeOutflowFunc == 3,
                                LakeArea / (timestepsecs * (lake_b)**0.5), mv)

    storage_start = pcr.ifthenelse(
        LakeStorFunc == 1,
        LakeArea * waterlevel_start,
        lookupResFunc(LakeLocs, waterlevel_start, sh, "0-1"),
    )

    SIFactor = pcr.ifthenelse(
        LakeOutflowFunc == 3,
        ((storage_start +
          (prec_av - pet_av) * LakeArea / 1000.0) / timestepsecs + inflow), mv)
    #Adjust SIFactor for ResThreshold != 0
    SIFactorAdj = SIFactor - LakeArea * LakeThreshold / timestepsecs

    #Calculate the new lake outflow/waterlevel/storage
    outflow = pcr.ifthenelse(
        LakeOutflowFunc == 3,
        pcr.ifthenelse(SIFactorAdj > 0.0,
                       (-LakeFactor +
                        (LakeFactor**2 + 2 * SIFactorAdj)**0.5)**2, 0.0),
        LakeZeros)
    storage = pcr.ifthenelse(LakeOutflowFunc == 3,
                             (SIFactor - outflow) * timestepsecs, LakeZeros)
    waterlevel = pcr.ifthenelse(LakeOutflowFunc == 3, storage / LakeArea,
                                LakeZeros)

    ### Linearisation and iteration for specific storage/rating curves ###
    np_lakeoutflowfunc = pcr.pcr2numpy(LakeOutflowFunc, 0.0)
    if ((bool(np.isin(1, np.unique(np_lakeoutflowfunc))))
            or (bool(np.isin(2, np.unique(np_lakeoutflowfunc))))):

        np_lakelocs = pcr.pcr2numpy(LakeLocs, 0.0)
        np_linkedlakelocs = pcr.pcr2numpy(LinkedLakeLocs, 0.0)
        waterlevel_loop = waterlevel_start

        _outflow = []
        nr_loop = np.max([int(timestepsecs / 21600), 1])
        for n in range(0, nr_loop):
            np_waterlevel = pcr.pcr2numpy(waterlevel_loop, np.nan)
            np_waterlevel_lower = np_waterlevel.copy()

            for val in np.unique(np_linkedlakelocs):
                if val > 0:
                    np_waterlevel_lower[np_linkedlakelocs ==
                                        val] = np_waterlevel[np.where(
                                            np_lakelocs == val)]

            diff_wl = np_waterlevel - np_waterlevel_lower
            diff_wl[np.isnan(diff_wl)] = mv
            np_waterlevel_lower[np.isnan(np_waterlevel_lower)] = mv

            pcr_diff_wl = pcr.numpy2pcr(pcr.Scalar, diff_wl, mv)
            pcr_wl_lower = pcr.numpy2pcr(pcr.Scalar, np_waterlevel_lower, mv)

            storage_start_loop = pcr.ifthenelse(
                LakeStorFunc == 1,
                LakeArea * waterlevel_loop,
                lookupResFunc(LakeLocs, waterlevel_loop, sh, "0-1"),
            )

            outflow_loop = pcr.ifthenelse(
                LakeOutflowFunc == 1,
                lookupResRegMatr(LakeLocs, waterlevel_loop, hq, JDOY),
                pcr.ifthenelse(
                    pcr_diff_wl >= 0,
                    pcr.max(lake_b * (waterlevel_loop - LakeThreshold)**lake_e,
                            0),
                    pcr.min(
                        -1 * lake_b * (pcr_wl_lower - LakeThreshold)**lake_e,
                        0),
                ),
            )

            np_outflow = pcr.pcr2numpy(outflow_loop, np.nan)
            np_outflow_linked = np_lakelocs * 0.0

            with np.errstate(invalid="ignore"):
                if np_outflow[np_outflow < 0] is not None:
                    np_outflow_linked[np.in1d(
                        np_lakelocs,
                        np_linkedlakelocs[np_outflow < 0]).reshape(
                            np_linkedlakelocs.shape)] = np_outflow[
                                np_outflow < 0]

            outflow_linked = pcr.numpy2pcr(pcr.Scalar, np_outflow_linked, 0.0)

            fl_nr_loop = float(nr_loop)
            storage_loop = (
                storage_start_loop + (inflow * timestepsecs / fl_nr_loop) +
                (prec_av / fl_nr_loop / 1000.0) * LakeArea -
                (pet_av / fl_nr_loop / 1000.0) * LakeArea -
                (pcr.cover(outflow_loop, 0.0) * timestepsecs / fl_nr_loop) +
                (pcr.cover(outflow_linked, 0.0) * timestepsecs / fl_nr_loop))

            waterlevel_loop = pcr.ifthenelse(
                LakeStorFunc == 1,
                waterlevel_loop +
                (storage_loop - storage_start_loop) / LakeArea,
                lookupResFunc(LakeLocs, storage_loop, sh, "1-0"),
            )

            np_outflow_nz = np_outflow * 0.0
            with np.errstate(invalid="ignore"):
                np_outflow_nz[np_outflow > 0] = np_outflow[np_outflow > 0]
            _outflow.append(np_outflow_nz)

        outflow_av_temp = np.average(_outflow, 0)
        outflow_av_temp[np.isnan(outflow_av_temp)] = mv
        outflow_av = pcr.numpy2pcr(pcr.Scalar, outflow_av_temp, mv)

        #Add the discharge/waterlevel/storage from the loop to the one from puls approach
        outflow = pcr.ifthenelse(LakeOutflowFunc == 3, outflow, outflow_av)
        waterlevel = pcr.ifthenelse(LakeOutflowFunc == 3, waterlevel,
                                    waterlevel_loop)
        storage = pcr.ifthenelse(LakeOutflowFunc == 3, storage, storage_loop)

    return waterlevel, outflow, prec_av, pet_av, storage
    vos.cmd_line(cmd, using_subprocess = False)
    clone_map_file = "clone_low_resolution_30min.map"
    # - set the clone and landmask map
    pcr.setclone(clone_map_file)
    landmask = pcr.boolean(1.0)
    #
    # save numpy arrays
    for i_file in range(0, len(file_names)):
        # rename 5 arc-min file
        file_name = file_names[i_file]
        cmd = 'mv ' +  file_name + " " + file_name + ".5min.map"
        vos.cmd_line(cmd, using_subprocess = False) 
        # report it to pcraster files
        print(file_name)
        os.system('pwd')
        pcr.report(pcr.numpy2pcr(pcr.Scalar, extreme_value_30min[file_name], vos.MV), file_name)
    #
    # prepare ldd at 30 arcmin resolution (we need this, only for the compatibility with the downscaling script)
    # - rename ldd 
    cmd = 'mv resampled_low_resolution_ldd.map resampled_low_resolution_ldd.5min.map'
    # - using 30 arcmin ldd 
    ldd_map_low_resolution_file_name = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map"
    ldd_map_low_resolution = vos.readPCRmapClone(ldd_map_low_resolution_file_name, \
                                                 clone_map_file, \
                                                 tmp_folder, \
                                                 None, True, None, False)
    ldd_map_low_resolution = pcr.lddrepair(pcr.ldd(ldd_map_low_resolution))
    ldd_map_low_resolution = pcr.lddrepair(ldd_map_low_resolution)
    pcr.report(ldd_map_low_resolution, "resampled_low_resolution_ldd.map")

예제 #23
0
def main():
    ### Read input arguments #####
    parser = OptionParser()
    usage = "usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option('-q', '--quiet',
                      dest='verbose', default=True, action='store_false',
                      help='do not print status messages to stdout')
    parser.add_option('-i', '--ini', dest='inifile',
                      default='hand_contour_inun.ini', nargs=1,
                      help='ini configuration file')
    parser.add_option('-f', '--flood_map',
                      nargs=1, dest='flood_map',
                      help='Flood map file (NetCDF point time series file')
    parser.add_option('-v', '--flood_variable',
                      nargs=1, dest='flood_variable',
                      default='water_level',
                      help='variable name of flood water level')
    parser.add_option('-b', '--bankfull_map',
                      dest='bankfull_map', default='',
                      help='Map containing bank full level (is subtracted from flood map, in NetCDF)')
    parser.add_option('-c', '--catchment',
                      dest='catchment_strahler', default=7, type='int',
                      help='Strahler order threshold >= are selected as catchment boundaries')
    parser.add_option('-t', '--time',
                      dest='time', default='',
                      help='time in YYYYMMDDHHMMSS, overrides time in NetCDF input if set')
    # parser.add_option('-s', '--hand_strahler',
    #                   dest='hand_strahler', default=7, type='int',
    #                   help='Strahler order threshold >= selected as riverine')
    parser.add_option('-m', '--max_strahler',
                      dest = 'max_strahler', default=1000, type='int',
                      help='Maximum Strahler order to loop over')
    parser.add_option('-d', '--destination',
                      dest='dest_path', default='inun',
                      help='Destination path')
    parser.add_option('-H', '--hand_file_prefix',
                      dest='hand_file_prefix', default='',
                      help='optional HAND file prefix of already generated HAND files')
    parser.add_option('-n', '--neg_HAND',
                      dest='neg_HAND', default=0, type='int',
                      help='if set to 1, allow for negative HAND values in HAND maps')
    (options, args) = parser.parse_args()

    if not os.path.exists(options.inifile):
        print 'path to ini file cannot be found'
        sys.exit(1)
    options.dest_path = os.path.abspath(options.dest_path)

    if not(os.path.isdir(options.dest_path)):
        os.makedirs(options.dest_path)

    # set up the logger
    flood_name = os.path.split(options.flood_map)[1].split('.')[0]
    # case_name = 'inun_{:s}_hand_{:02d}_catch_{:02d}'.format(flood_name, options.hand_strahler, options.catchment_strahler)
    case_name = 'inun_{:s}_catch_{:02d}'.format(flood_name, options.catchment_strahler)
    logfilename = os.path.join(options.dest_path, 'hand_contour_inun.log')
    logger, ch = inun_lib.setlogger(logfilename, 'HAND_INUN', options.verbose)
    logger.info('$Id: $')
    logger.info('Flood map: {:s}'.format(options.flood_map))
    logger.info('Bank full map: {:s}'.format(options.bankfull_map))
    logger.info('Destination path: {:s}'.format(options.dest_path))
    # read out ini file
    ### READ CONFIG FILE
    # open config-file
    config = inun_lib.open_conf(options.inifile)
    
    # read settings
    options.dem_file = inun_lib.configget(config, 'HighResMaps',
                                  'dem_file',
                                  True)
    options.ldd_file = inun_lib.configget(config, 'HighResMaps',
                                'ldd_file',
                                 True)
    options.stream_file = inun_lib.configget(config, 'HighResMaps',
                                'stream_file',
                                 True)
    options.riv_length_fact_file = inun_lib.configget(config, 'wflowResMaps',
                                'riv_length_fact_file',
                                 True)
    options.ldd_wflow = inun_lib.configget(config, 'wflowResMaps',
                                'ldd_wflow',
                                True)
    options.riv_width_file = inun_lib.configget(config, 'wflowResMaps',
                                'riv_width_file',
                                 True)
    options.file_format = inun_lib.configget(config, 'file_settings',
                                'file_format', 0, datatype='int')
    options.out_format = inun_lib.configget(config, 'file_settings',
                                'out_format', 0, datatype='int')
    options.latlon = inun_lib.configget(config, 'file_settings',
                                 'latlon', 0, datatype='int')
    options.x_tile = inun_lib.configget(config, 'tiling',
                                  'x_tile', 10000, datatype='int')
    options.y_tile = inun_lib.configget(config, 'tiling',
                                  'y_tile', 10000, datatype='int')
    options.x_overlap = inun_lib.configget(config, 'tiling',
                                  'x_overlap', 1000, datatype='int')
    options.y_overlap = inun_lib.configget(config, 'tiling',
                                  'y_overlap', 1000, datatype='int')
    options.iterations = inun_lib.configget(config, 'inundation',
                                  'iterations', 20, datatype='int')
    options.initial_level = inun_lib.configget(config, 'inundation',
                                  'initial_level', 32., datatype='float')
    options.flood_volume_type = inun_lib.configget(config, 'inundation',
                                  'flood_volume_type', 0, datatype='int')

    # options.area_multiplier = inun_lib.configget(config, 'inundation',
    #                               'area_multiplier', 1., datatype='float')
    logger.info('DEM file: {:s}'.format(options.dem_file))
    logger.info('LDD file: {:s}'.format(options.ldd_file))
    logger.info('Columns per tile: {:d}'.format(options.x_tile))
    logger.info('Rows per tile: {:d}'.format(options.y_tile))
    logger.info('Columns overlap: {:d}'.format(options.x_overlap))
    logger.info('Rows overlap: {:d}'.format(options.y_overlap))
    metadata_global = {}
    # add metadata from the section [metadata]
    meta_keys = config.options('metadata_global')
    for key in meta_keys:
        metadata_global[key] = config.get('metadata_global', key)
    # add a number of metadata variables that are mandatory
    metadata_global['config_file'] = os.path.abspath(options.inifile)
    metadata_var = {}
    metadata_var['units'] = 'm'
    metadata_var['standard_name'] = 'water_surface_height_above_reference_datum'
    metadata_var['long_name'] = 'flooding'
    metadata_var['comment'] = 'water_surface_reference_datum_altitude is given in file {:s}'.format(options.dem_file)
    if not os.path.exists(options.dem_file):
        logger.error('path to dem file {:s} cannot be found'.format(options.dem_file))
        sys.exit(1)
    if not os.path.exists(options.ldd_file):
        logger.error('path to ldd file {:s} cannot be found'.format(options.ldd_file))
        sys.exit(1)

    # Read extent from a GDAL compatible file
    try:
        extent = inun_lib.get_gdal_extent(options.dem_file)
    except:
        msg = 'Input file {:s} not a gdal compatible file'.format(options.dem_file)
        inun_lib.close_with_error(logger, ch, msg)
        sys.exit(1)

    try:
        x, y = inun_lib.get_gdal_axes(options.dem_file, logging=logger)
        srs = inun_lib.get_gdal_projection(options.dem_file, logging=logger)
    except:
        msg = 'Input file {:s} not a gdal compatible file'.format(options.dem_file)
        inun_lib.close_with_error(logger, ch, msg)
        sys.exit(1)

    # read history from flood file
    if options.file_format == 0:
        a = nc.Dataset(options.flood_map, 'r')
        metadata_global['history'] = 'Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}'.format(os.path.abspath(options.flood_map), a.history)
        a.close()
    else:
        metadata_global['history'] = 'Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}'.format(os.path.abspath(options.flood_map), 'PCRaster file, no history')

    # first write subcatch maps and hand maps
    ############### TODO ######
    # setup a HAND file for each strahler order

    max_s = inun_lib.define_max_strahler(options.stream_file, logging=logger)
    stream_max = np.minimum(max_s, options.max_strahler)

    for hand_strahler in range(options.catchment_strahler, stream_max + 1, 1):
        dem_name = os.path.split(options.dem_file)[1].split('.')[0]
        if os.path.isfile('{:s}_{:02d}.tif'.format(options.hand_file_prefix, hand_strahler)):
            hand_file = '{:s}_{:02d}.tif'.format(options.hand_file_prefix, hand_strahler)
        else:
            logger.info('No HAND files with HAND prefix were found, checking {:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))
            hand_file = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))
        if not(os.path.isfile(hand_file)):
        # hand file does not exist yet! Generate it, otherwise skip!
            logger.info('HAND file {:s} not found, start setting up...please wait...'.format(hand_file))
            hand_file_tmp = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif.tmp'.format(dem_name, hand_strahler))
            ds_hand, band_hand = inun_lib.prepare_gdal(hand_file_tmp, x, y, logging=logger, srs=srs)
            # band_hand = ds_hand.GetRasterBand(1)

            # Open terrain data for reading
            ds_dem, rasterband_dem = inun_lib.get_gdal_rasterband(options.dem_file)
            ds_ldd, rasterband_ldd = inun_lib.get_gdal_rasterband(options.ldd_file)
            ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(options.stream_file)
            n = 0
            for x_loop in range(0, len(x), options.x_tile):
                x_start = np.maximum(x_loop, 0)
                x_end = np.minimum(x_loop + options.x_tile, len(x))
                # determine actual overlap for cutting
                for y_loop in range(0, len(y), options.y_tile):
                    x_overlap_min = x_start - np.maximum(x_start - options.x_overlap, 0)
                    x_overlap_max = np.minimum(x_end + options.x_overlap, len(x)) - x_end
                    n += 1
                    # print('tile {:001d}:'.format(n))
                    y_start = np.maximum(y_loop, 0)
                    y_end = np.minimum(y_loop + options.y_tile, len(y))
                    y_overlap_min = y_start - np.maximum(y_start - options.y_overlap, 0)
                    y_overlap_max = np.minimum(y_end + options.y_overlap, len(y)) - y_end
                    # cut out DEM
                    logger.debug('Computing HAND for xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}'.format(x_start, x_end,y_start, y_end))
                    terrain = rasterband_dem.ReadAsArray(x_start - x_overlap_min,
                                                         y_start - y_overlap_min,
                                                         (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                         (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                         )

                    drainage = rasterband_ldd.ReadAsArray(x_start - x_overlap_min,
                                                         y_start - y_overlap_min,
                                                         (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                         (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                         )
                    stream = rasterband_stream.ReadAsArray(x_start - x_overlap_min,
                                                           y_start - y_overlap_min,
                                                           (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                           (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                           )
                    # write to temporary file
                    terrain_temp_file = os.path.join(options.dest_path, 'terrain_temp.map')
                    drainage_temp_file = os.path.join(options.dest_path, 'drainage_temp.map')
                    stream_temp_file = os.path.join(options.dest_path, 'stream_temp.map')
                    if rasterband_dem.GetNoDataValue() is not None:
                        inun_lib.gdal_writemap(terrain_temp_file, 'PCRaster',
                                          np.arange(0, terrain.shape[1]),
                                          np.arange(0, terrain.shape[0]),
                                          terrain, rasterband_dem.GetNoDataValue(),
                                          gdal_type=gdal.GDT_Float32,
                                          logging=logger)
                    else:
                        # in case no nodata value is found
                        logger.warning('No nodata value found in {:s}. assuming -9999'.format(options.dem_file))
                        inun_lib.gdal_writemap(terrain_temp_file, 'PCRaster',
                                          np.arange(0, terrain.shape[1]),
                                          np.arange(0, terrain.shape[0]),
                                          terrain, -9999.,
                                          gdal_type=gdal.GDT_Float32,
                                          logging=logger)

                    inun_lib.gdal_writemap(drainage_temp_file, 'PCRaster',
                                      np.arange(0, terrain.shape[1]),
                                      np.arange(0, terrain.shape[0]),
                                      drainage, rasterband_ldd.GetNoDataValue(),
                                      gdal_type=gdal.GDT_Int32,
                                      logging=logger)
                    inun_lib.gdal_writemap(stream_temp_file, 'PCRaster',
                                      np.arange(0, terrain.shape[1]),
                                      np.arange(0, terrain.shape[0]),
                                      stream, rasterband_ldd.GetNoDataValue(),
                                      gdal_type=gdal.GDT_Int32,
                                      logging=logger)
                    # read as pcr objects
                    pcr.setclone(terrain_temp_file)
                    terrain_pcr = pcr.readmap(terrain_temp_file)
                    drainage_pcr = pcr.lddrepair(pcr.ldd(pcr.readmap(drainage_temp_file)))  # convert to ldd type map
                    stream_pcr = pcr.scalar(pcr.readmap(stream_temp_file))  # convert to ldd type map

                    #check if the highest stream order of the tile is below the hand_strahler
                    # if the highest stream order of the tile is smaller than hand_strahler, than DEM values are taken instead of HAND values.
                    max_stream_tile = inun_lib.define_max_strahler(stream_temp_file, logging=logger)
                    if max_stream_tile < hand_strahler:
                        hand_pcr = terrain_pcr
                        logger.info('For this tile, DEM values are used instead of HAND because there is no stream order larger than {:02d}'.format(hand_strahler))
                    else:
                    # compute streams
                        stream_ge, subcatch = inun_lib.subcatch_stream(drainage_pcr, hand_strahler, stream=stream_pcr) # generate streams
                        # compute basins
                        stream_ge_dummy, subcatch = inun_lib.subcatch_stream(drainage_pcr, options.catchment_strahler, stream=stream_pcr) # generate streams
                        basin = pcr.boolean(subcatch)
                        hand_pcr, dist_pcr = inun_lib.derive_HAND(terrain_pcr, drainage_pcr, 3000,
                                                                  rivers=pcr.boolean(stream_ge), basin=basin, neg_HAND=options.neg_HAND)
                    # convert to numpy
                    hand = pcr.pcr2numpy(hand_pcr, -9999.)
                    # cut relevant part
                    if y_overlap_max == 0:
                        y_overlap_max = -hand.shape[0]
                    if x_overlap_max == 0:
                        x_overlap_max = -hand.shape[1]
                    hand_cut = hand[0+y_overlap_min:-y_overlap_max, 0+x_overlap_min:-x_overlap_max]

                    band_hand.WriteArray(hand_cut, x_start, y_start)
                    os.unlink(terrain_temp_file)
                    os.unlink(drainage_temp_file)
                    os.unlink(stream_temp_file)
                    band_hand.FlushCache()
            ds_dem = None
            ds_ldd = None
            ds_stream = None
            band_hand.SetNoDataValue(-9999.)
            ds_hand = None
            logger.info('Finalizing {:s}'.format(hand_file))
            # rename temporary file to final hand file
            os.rename(hand_file_tmp, hand_file)
        else:
            logger.info('HAND file {:s} already exists...skipping...'.format(hand_file))

    #####################################################################################
    #  HAND file has now been prepared, moving to flood mapping part                    #
    #####################################################################################
    # set the clone
    pcr.setclone(options.ldd_wflow)
    # read wflow ldd as pcraster object
    ldd_pcr = pcr.readmap(options.ldd_wflow)
    xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(options.riv_width_file, 'GTiff', logging=logger)

    # determine cell length in meters using ldd_pcr as clone (if latlon=True, values are converted to m2
    x_res, y_res, reallength_wflow = pcrut.detRealCellLength(pcr.scalar(ldd_pcr), not(bool(options.latlon)))
    cell_surface_wflow = pcr.pcr2numpy(x_res * y_res, 0)

    if options.flood_volume_type == 0:
        # load the staticmaps needed to estimate volumes across all
        # xax, yax, riv_length, fill_value = inun_lib.gdal_readmap(options.riv_length_file, 'GTiff', logging=logger)
        # riv_length = np.ma.masked_where(riv_length==fill_value, riv_length)
        xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(options.riv_width_file, 'GTiff', logging=logger)
        riv_width[riv_width == fill_value] = 0

        # read river length factor file (multiplier)
        xax, yax, riv_length_fact, fill_value = inun_lib.gdal_readmap(options.riv_length_fact_file, 'GTiff', logging=logger)
        riv_length_fact = np.ma.masked_where(riv_length_fact==fill_value, riv_length_fact)
        drain_length = wflow_lib.detdrainlength(ldd_pcr, x_res, y_res)

        # compute river length in each cell
        riv_length = pcr.pcr2numpy(drain_length, 0) * riv_length_fact
        # riv_length_pcr = pcr.numpy2pcr(pcr.Scalar, riv_length, 0)

    flood_folder = os.path.join(options.dest_path, case_name)
    flood_vol_map = os.path.join(flood_folder, '{:s}_vol.tif'.format(os.path.split(options.flood_map)[1].split('.')[0]))
    if not(os.path.isdir(flood_folder)):
        os.makedirs(flood_folder)
    if options.out_format == 0:
        inun_file_tmp = os.path.join(flood_folder, '{:s}.tif.tmp'.format(case_name))
        inun_file = os.path.join(flood_folder, '{:s}.tif'.format(case_name))
    else:
        inun_file_tmp = os.path.join(flood_folder, '{:s}.nc.tmp'.format(case_name))
        inun_file = os.path.join(flood_folder, '{:s}.nc'.format(case_name))

    hand_temp_file = os.path.join(flood_folder, 'hand_temp.map')
    drainage_temp_file = os.path.join(flood_folder, 'drainage_temp.map')
    stream_temp_file = os.path.join(flood_folder, 'stream_temp.map')
    flood_vol_temp_file = os.path.join(flood_folder, 'flood_warp_temp.tif')
    # load the data with river levels and compute the volumes
    if options.file_format == 0:
        # assume we need the maximum value in a NetCDF time series grid
        logger.info('Reading flood from {:s} NetCDF file'.format(options.flood_map))
        a = nc.Dataset(options.flood_map, 'r')
        if options.latlon == 0:
            xax = a.variables['x'][:]
            yax = a.variables['y'][:]
        else:
            xax = a.variables['lon'][:]
            yax = a.variables['lat'][:]
        if options.time == '':
            time_list = nc.num2date(a.variables['time'][:], units = a.variables['time'].units, calendar=a.variables['time'].calendar)
            time = [time_list[len(time_list)/2]]
        else:
            time = [dt.datetime.strptime(options.time, '%Y%m%d%H%M%S')]

        flood_series = a.variables[options.flood_variable][:]
        flood_data = flood_series.max(axis=0)
        if np.ma.is_masked(flood_data):
            flood = flood_data.data
            flood[flood_data.mask] = 0
        if yax[-1] > yax[0]:
            yax = np.flipud(yax)
            flood = np.flipud(flood)
        a.close()
    elif options.file_format == 1:
        logger.info('Reading flood from {:s} PCRaster file'.format(options.flood_map))
        xax, yax, flood, flood_fill_value = inun_lib.gdal_readmap(options.flood_map, 'PCRaster', logging=logger)
        flood = np.ma.masked_equal(flood, flood_fill_value)
        if options.time == '':
            options.time = '20000101000000'
        time = [dt.datetime.strptime(options.time, '%Y%m%d%H%M%S')]

        flood[flood==flood_fill_value] = 0.
    # load the bankfull depths
    if options.bankfull_map == '':
        bankfull = np.zeros(flood.shape)
    else:
        if options.file_format == 0:
            logger.info('Reading bankfull from {:s} NetCDF file'.format(options.bankfull_map))
            a = nc.Dataset(options.bankfull_map, 'r')
            xax = a.variables['x'][:]
            yax = a.variables['y'][:]

            bankfull_series = a.variables[options.flood_variable][:]
            bankfull_data = bankfull_series.max(axis=0)
            if np.ma.is_masked(bankfull_data):
                bankfull = bankfull_data.data
                bankfull[bankfull_data.mask] = 0
            if yax[-1] > yax[0]:
                yax = np.flipud(yax)
                bankfull = np.flipud(bankfull)
            a.close()
        elif options.file_format == 1:
            logger.info('Reading bankfull from {:s} PCRaster file'.format(options.bankfull_map))
            xax, yax, bankfull, bankfull_fill_value = inun_lib.gdal_readmap(options.bankfull_map, 'PCRaster', logging=logger)
            bankfull = np.ma.masked_equal(bankfull, bankfull_fill_value)
#     flood = bankfull*2
    # res_x = 2000
    # res_y = 2000
    # subtract the bankfull water level to get flood levels (above bankfull)
    flood_vol = np.maximum(flood-bankfull, 0)
    if options.flood_volume_type == 0:
        flood_vol_m = riv_length*riv_width*flood_vol/cell_surface_wflow  # volume expressed in meters water disc
        flood_vol_m_pcr = pcr.numpy2pcr(pcr.Scalar, flood_vol_m, 0)
    else:
        flood_vol_m = flood_vol/cell_surface_wflow
    flood_vol_m_data = flood_vol_m.data
    flood_vol_m_data[flood_vol_m.mask] = -999.
    logger.info('Saving water layer map to {:s}'.format(flood_vol_map))
    # write to a tiff file
    inun_lib.gdal_writemap(flood_vol_map, 'GTiff', xax, yax, np.maximum(flood_vol_m_data, 0), -999., logging=logger)
    # this is placed later in the hand loop
    # ds_hand, rasterband_hand = inun_lib.get_gdal_rasterband(hand_file)
    ds_ldd, rasterband_ldd = inun_lib.get_gdal_rasterband(options.ldd_file)
    ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(options.stream_file)

    logger.info('Preparing flood map in {:s} ...please wait...'.format(inun_file))
    if options.out_format == 0:
        ds_inun, band_inun = inun_lib.prepare_gdal(inun_file_tmp, x, y, logging=logger, srs=srs)
        # band_inun = ds_inun.GetRasterBand(1)
    else:
        ds_inun, band_inun = inun_lib.prepare_nc(inun_file_tmp, time, x, np.flipud(y), metadata=metadata_global,
                                                 metadata_var=metadata_var, logging=logger)
    # loop over all the tiles
    n = 0
    for x_loop in range(0, len(x), options.x_tile):
        x_start = np.maximum(x_loop, 0)
        x_end = np.minimum(x_loop + options.x_tile, len(x))
        # determine actual overlap for cutting
        for y_loop in range(0, len(y), options.y_tile):
            x_overlap_min = x_start - np.maximum(x_start - options.x_overlap, 0)
            x_overlap_max = np.minimum(x_end + options.x_overlap, len(x)) - x_end
            n += 1
            # print('tile {:001d}:'.format(n))
            y_start = np.maximum(y_loop, 0)
            y_end = np.minimum(y_loop + options.y_tile, len(y))
            y_overlap_min = y_start - np.maximum(y_start - options.y_overlap, 0)
            y_overlap_max = np.minimum(y_end + options.y_overlap, len(y)) - y_end
            x_tile_ax = x[x_start - x_overlap_min:x_end + x_overlap_max]
            y_tile_ax = y[y_start - y_overlap_min:y_end + y_overlap_max]
            # cut out DEM
            logger.debug('handling xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}'.format(x_start, x_end, y_start, y_end))


            drainage = rasterband_ldd.ReadAsArray(x_start - x_overlap_min,
                                                 y_start - y_overlap_min,
                                                 (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                 (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                 )
            stream = rasterband_stream.ReadAsArray(x_start - x_overlap_min,
                                                   y_start - y_overlap_min,
                                                   (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                   (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                   )

            # stream_max = np.minimum(stream.max(), options.max_strahler)


            inun_lib.gdal_writemap(drainage_temp_file, 'PCRaster',
                              x_tile_ax,
                              y_tile_ax,
                              drainage, rasterband_ldd.GetNoDataValue(),
                              gdal_type=gdal.GDT_Int32,
                              logging=logger)
            inun_lib.gdal_writemap(stream_temp_file, 'PCRaster',
                              x_tile_ax,
                              y_tile_ax,
                              stream, rasterband_stream.GetNoDataValue(),
                              gdal_type=gdal.GDT_Int32,
                              logging=logger)


            # read as pcr objects
            pcr.setclone(stream_temp_file)
            drainage_pcr = pcr.lddrepair(pcr.ldd(pcr.readmap(drainage_temp_file)))  # convert to ldd type map
            stream_pcr = pcr.scalar(pcr.readmap(stream_temp_file))  # convert to ldd type map

            # warp of flood volume to inundation resolution
            inun_lib.gdal_warp(flood_vol_map, stream_temp_file, flood_vol_temp_file, gdal_interp=gdalconst.GRA_NearestNeighbour) # ,
            x_tile_ax, y_tile_ax, flood_meter, fill_value = inun_lib.gdal_readmap(flood_vol_temp_file, 'GTiff', logging=logger)
            # make sure that the option unittrue is on !! (if unitcell was is used in another function)
            x_res_tile, y_res_tile, reallength = pcrut.detRealCellLength(pcr.scalar(stream_pcr), not(bool(options.latlon)))
            cell_surface_tile = pcr.pcr2numpy(x_res_tile * y_res_tile, 0)

            # convert meter depth to volume [m3]
            flood_vol = pcr.numpy2pcr(pcr.Scalar, flood_meter*cell_surface_tile, fill_value)

            # first prepare a basin map, belonging to the lowest order we are looking at
            inundation_pcr = pcr.scalar(stream_pcr) * 0
            for hand_strahler in range(options.catchment_strahler, stream_max + 1, 1):
                # hand_temp_file = os.path.join(flood_folder, 'hand_temp.map')
                if os.path.isfile(os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))):
                    hand_file = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))
                else:
                    hand_file = '{:s}_{:02d}.tif'.format(options.hand_file_prefix, hand_strahler)
                ds_hand, rasterband_hand = inun_lib.get_gdal_rasterband(hand_file)
                hand = rasterband_hand.ReadAsArray(x_start - x_overlap_min,
                                             y_start - y_overlap_min,
                                             (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                             (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                             )
                print('len x-ax: {:d} len y-ax {:d} x-shape {:d} y-shape {:d}'.format(len(x_tile_ax), len(y_tile_ax), hand.shape[1], hand.shape[0]))

                inun_lib.gdal_writemap(hand_temp_file, 'PCRaster',
                          x_tile_ax,
                          y_tile_ax,
                          hand, rasterband_hand.GetNoDataValue(),
                          gdal_type=gdal.GDT_Float32,
                          logging=logger)

                hand_pcr = pcr.readmap(hand_temp_file)

                stream_ge_hand, subcatch_hand = inun_lib.subcatch_stream(drainage_pcr, options.catchment_strahler, stream=stream_pcr)
                # stream_ge_hand, subcatch_hand = inun_lib.subcatch_stream(drainage_pcr, hand_strahler, stream=stream_pcr)
                stream_ge, subcatch = inun_lib.subcatch_stream(drainage_pcr,
                                                               options.catchment_strahler,
                                                               stream=stream_pcr,
                                                               basin=pcr.boolean(pcr.cover(subcatch_hand, 0)),
                                                               assign_existing=True,
                                                               min_strahler=hand_strahler,
                                                               max_strahler=hand_strahler) # generate subcatchments, only within basin for HAND
                flood_vol_strahler = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)), flood_vol, 0) # mask the flood volume map with the created subcatch map for strahler order = hand_strahler

                inundation_pcr_step = inun_lib.volume_spread(drainage_pcr, hand_pcr,
                                                             pcr.subcatchment(drainage_pcr, subcatch), # to make sure backwater effects can occur from higher order rivers to lower order rivers
                                                             flood_vol_strahler,
                                                             volume_thres=0.,
                                                             iterations=options.iterations,
                                                             cell_surface=pcr.numpy2pcr(pcr.Scalar, cell_surface_tile, -9999),
                                                             logging=logger,
                                                             order=hand_strahler,
                                                             neg_HAND=options.neg_HAND) # 1166400000.
                # use maximum value of inundation_pcr_step and new inundation for higher strahler order
                inundation_pcr = pcr.max(inundation_pcr, inundation_pcr_step)
            inundation = pcr.pcr2numpy(inundation_pcr, -9999.)
            # cut relevant part
            if y_overlap_max == 0:
                y_overlap_max = -inundation.shape[0]
            if x_overlap_max == 0:
                x_overlap_max = -inundation.shape[1]
            inundation_cut = inundation[0+y_overlap_min:-y_overlap_max, 0+x_overlap_min:-x_overlap_max]
            # inundation_cut
            if options.out_format == 0:
                band_inun.WriteArray(inundation_cut, x_start, y_start)
                band_inun.FlushCache()
            else:
                # with netCDF, data is up-side-down.
                inun_lib.write_tile_nc(band_inun, inundation_cut, x_start, y_start)
            # clean up
            os.unlink(flood_vol_temp_file)
            os.unlink(drainage_temp_file)
            os.unlink(hand_temp_file)
            os.unlink(stream_temp_file)     #also remove temp stream file from output folder

            # if n == 35:
            #     band_inun.SetNoDataValue(-9999.)
            #     ds_inun = None
            #     sys.exit(0)
    # os.unlink(flood_vol_map)

    logger.info('Finalizing {:s}'.format(inun_file))
    # add the metadata to the file and band
    # band_inun.SetNoDataValue(-9999.)
    # ds_inun.SetMetadata(metadata_global)
    # band_inun.SetMetadata(metadata_var)
    if options.out_format == 0:
        ds_inun = None
        ds_hand = None
    else:
        ds_inun.close()

    ds_ldd = None
    # rename temporary file to final hand file
    if os.path.isfile(inun_file):
        # remove an old result if available
        os.unlink(inun_file)
    os.rename(inun_file_tmp, inun_file)

    logger.info('Done! Thank you for using hand_contour_inun.py')
    logger, ch = inun_lib.closeLogger(logger, ch)
    del logger, ch
    sys.exit(0)
예제 #24
0
  def test_numpy2pcr(self):
      nrRows, nrCols, cellSize = 3, 2, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      # Values in array must fit the value scale of the raster exactly. This
      # will be checked.

      # Valid boolean values are: 0, 1, missing_value.
      # Valid ldd values are: 1, 2, 3, 4, 5, 6, 7, 8, 9, missing_value.
      # Valid nominal values are: [-2^31 + 1, 2^31], missing_value.
      # Valid ordinal values are: [-2^31 + 1, 2^31], missing_value.
      # Valid scalar values are: All 32 bit float values.
      # Valid directional values are: All 32 bit float values.

      # bool_min = 0
      # bool_max = 1
      int8_min = numpy.iinfo(numpy.int8).min
      int8_max = numpy.iinfo(numpy.int8).max
      int32_min = numpy.iinfo(numpy.int32).min
      int32_max = numpy.iinfo(numpy.int32).max
      int64_min = numpy.iinfo(numpy.int64).min
      int64_max = numpy.iinfo(numpy.int64).max

      # bool -> Boolean (uint8)
      raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
          [1,  1],
          [0,  5],
          [1,  1]], numpy.bool), 5)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (True, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (False, True))
      # It is not possible to create a bool array with other values than
      # 0 and 1. Passing 5 as missing value has no effect.
      self.assertEqual(pcraster.cellvalue(raster, 2, 2), (True, True))

      # int8 -> Boolean (uint8)
      raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
          [1,  1],
          [0,  5],
          [1,  1]], numpy.int8), 5)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (True, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (False, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
              [1,  1],
              [9,  5],
              [1,  1]], numpy.int8), 5)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 9 at input array [1][0] for Boolean map")

      # int8 -> Ldd (uint8)
      raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
          [1,  2],
          [5,  15],
          [8,  9]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (1, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
              [ 1,  2],
              [10, 15],
              [ 8,  9]], numpy.int8), 15)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 10 at input array [1][0] for LDD map")

      # int8 -> Nominal (int32)
      # All valid int8 values are valid nominal values.
      raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [int8_min,         2],
          [       5,        15],
          [       8,  int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int8 -> Ordinal (int32)
      # All valid int8 values are valid ordinal values.
      raster = pcraster.numpy2pcr(pcraster.Ordinal, numpy.array([
          [int8_min,         2],
          [       5,        15],
          [       8,  int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int8 -> Scalar (float32)
      # All valid int8 values are valid scalar values.
      raster = pcraster.numpy2pcr(pcraster.Scalar, numpy.array([
          [int8_min,        2],
          [       5,       15],
          [       8, int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int8 -> Directional (float32)
      # All valid int8 values are valid directional values.
      raster = pcraster.numpy2pcr(pcraster.Directional, numpy.array([
          [int8_min,        2],
          [       5,       15],
          [       8, int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int16 TODO
      # int32 TODO

      # int64 -> Boolean (uint8)
      raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
          [1,  1],
          [0,  5],
          [1,  1]], numpy.int64), 5)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (True, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (False, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
              [1,  1],
              [9,  5],
              [1,  1]], numpy.int64), 5)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 9 at input array [1][0] for Boolean map")

      # int64 -> Ldd (uint8)
      raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
          [1,  2],
          [5,  15],
          [8,  9]], numpy.int64), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (1, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
              [ 1,  2],
              [10, 15],
              [ 8,  9]], numpy.int64), 15)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 10 at input array [1][0] for LDD map")

      # int64 -> Nominal (int32)
      raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [int32_min+1,         2],
          [          5,        15],
          [          8, int32_max]], numpy.int64), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int32_min + 1, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (            5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (    int32_max, True))

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
              [int32_min+1,         2],
              [  int32_min,        15],
              [          8, int32_max]], numpy.int64), 15)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value -2147483648 at input array [1][0] for Nominal map")

      # uint8 TODO
      # uint16 TODO
      # uint32 TODO
      # uint64 TODO

      # float16 TODO

      # float32 -> Scalar (float32)
      raster = pcraster.numpy2pcr(pcraster.Scalar, numpy.array([
          [-2,        -1],
          [ 0, numpy.nan],
          [ 1,         2]], numpy.float32), numpy.nan)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (-2, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), ( 0, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2),  (2, True))

      # float64 -> Scalar (float32)
      raster = pcraster.numpy2pcr(pcraster.Scalar, numpy.array([
          [-2,        -1],
          [ 0, numpy.nan],
          [ 1,         2]], numpy.float64), numpy.nan)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (-2, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), ( 0, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2),  (2, True))

      # complex64: Not supported.
      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [-2, -1],
          [ 0, 15],
          [ 1,  2]], numpy.complex64), 15)
      self.assertEqual(str(context_manager.exception),
          "Unsupported array type")
예제 #25
0
def main(source,
         destination,
         inifile,
         dem_in,
         rivshp,
         catchshp,
         gaugeshp=None,
         landuse=None,
         soil=None,
         lai=None,
         other_maps=None,
         logfilename='wtools_static_maps.log',
         verbose=True,
         clean=True,
         alltouch=False,
         outlets=([], [])):
    # parse other maps into an array
    if not other_maps == None:
        if type(other_maps) == str:
            print other_maps
            other_maps = other_maps.replace(' ', '').replace('[', '').replace(
                ']', '').split(',')

    source = os.path.abspath(source)
    clone_map = os.path.join(source, 'mask.map')
    clone_shp = os.path.join(source, 'mask.shp')
    clone_prj = os.path.join(source, 'mask.prj')

    if None in (rivshp, catchshp, dem_in):
        msg = """The following files are compulsory:
        - DEM (raster)
        - river (shape)
        - catchment (shape)
        """
        print(msg)
        parser.print_help()
        sys.exit(1)
    if (inifile is not None) and (not os.path.exists(inifile)):
        print 'path to ini file cannot be found'
        sys.exit(1)
    if not os.path.exists(rivshp):
        print 'path to river shape cannot be found'
        sys.exit(1)
    if not os.path.exists(catchshp):
        print 'path to catchment shape cannot be found'
        sys.exit(1)
    if not os.path.exists(dem_in):
        print 'path to DEM cannot be found'
        sys.exit(1)

    # open a logger, dependent on verbose print to screen or not
    logger, ch = wt.setlogger(logfilename, 'WTOOLS', verbose)

    # create directories # TODO: check if workdir is still necessary, try to
    # keep in memory as much as possible

    # delete old files (when the source and destination folder are different)
    if np.logical_and(os.path.isdir(destination), destination is not source):
        shutil.rmtree(destination)
    if destination is not source:
        os.makedirs(destination)

    # Read mask
    if not (os.path.exists(clone_map)):
        logger.error(
            'Clone file {:s} not found. Please run create_grid first.'.format(
                clone_map))
        sys.exit(1)
    else:
        # set clone
        pcr.setclone(clone_map)
        # get the extent from clone.tif
        xax, yax, clone, fill_value = wt.gdal_readmap(clone_map, 'GTiff')
        trans = wt.get_geotransform(clone_map)
        extent = wt.get_extent(clone_map)
        xmin, ymin, xmax, ymax = extent
        zeros = np.zeros(clone.shape)
        ones = pcr.numpy2pcr(pcr.Scalar, np.ones(clone.shape), -9999)
        # get the projection from clone.tif
        srs = wt.get_projection(clone_map)
        unit_clone = srs.GetAttrValue('UNIT').lower()

    # READ CONFIG FILE
    # open config-file
    if inifile is None:
        config = ConfigParser.SafeConfigParser()
        config.optionxform = str
    else:
        config = wt.OpenConf(inifile)

    # read settings
    snapgaugestoriver = wt.configget(config,
                                     'settings',
                                     'snapgaugestoriver',
                                     True,
                                     datatype='boolean')
    burnalltouching = wt.configget(config,
                                   'settings',
                                   'burncatchalltouching',
                                   True,
                                   datatype='boolean')
    burninorder = wt.configget(config,
                               'settings',
                               'burncatchalltouching',
                               False,
                               datatype='boolean')
    verticetollerance = wt.configget(config,
                                     'settings',
                                     'vertice_tollerance',
                                     0.0001,
                                     datatype='float')
    ''' read parameters '''
    burn_outlets = wt.configget(config,
                                'parameters',
                                'burn_outlets',
                                10000,
                                datatype='int')
    burn_rivers = wt.configget(config,
                               'parameters',
                               'burn_rivers',
                               200,
                               datatype='int')
    burn_connections = wt.configget(config,
                                    'parameters',
                                    'burn_connections',
                                    100,
                                    datatype='int')
    burn_gauges = wt.configget(config,
                               'parameters',
                               'burn_gauges',
                               100,
                               datatype='int')
    minorder = wt.configget(config,
                            'parameters',
                            'riverorder_min',
                            3,
                            datatype='int')
    try:
        percentiles = np.array(config.get('parameters', 'statisticmaps',
                                          '0, 100').replace(' ',
                                                            '').split(','),
                               dtype='float')
    except ConfigParser.NoOptionError:
        percentiles = [0.0, 100.0]
    # read the parameters for generating a temporary very high resolution grid
    if unit_clone == 'degree':
        cellsize_hr = wt.configget(config,
                                   'parameters',
                                   'highres_degree',
                                   0.0005,
                                   datatype='float')
    elif (unit_clone == 'metre') or (unit_clone == 'meter'):
        cellsize_hr = wt.configget(config,
                                   'parameters',
                                   'highres_metre',
                                   50,
                                   datatype='float')

    cols_hr = int((float(xmax) - float(xmin)) / cellsize_hr + 2)
    rows_hr = int((float(ymax) - float(ymin)) / cellsize_hr + 2)
    hr_trans = (float(xmin), cellsize_hr, float(0), float(ymax), 0,
                -cellsize_hr)
    clone_hr = os.path.join(destination, 'clone_highres.tif')
    # make a highres clone as well!
    wt.CreateTif(clone_hr, rows_hr, cols_hr, hr_trans, srs, 0)

    # read staticmap locations
    catchment_map = wt.configget(config, 'staticmaps', 'catchment',
                                 'wflow_catchment.map')
    dem_map = wt.configget(config, 'staticmaps', 'dem', 'wflow_dem.map')
    demmax_map = wt.configget(config, 'staticmaps', 'demmax',
                              'wflow_demmax.map')
    demmin_map = wt.configget(config, 'staticmaps', 'demmin',
                              'wflow_demmin.map')
    gauges_map = wt.configget(config, 'staticmaps', 'gauges',
                              'wflow_gauges.map')
    landuse_map = wt.configget(config, 'staticmaps', 'landuse',
                               'wflow_landuse.map')
    ldd_map = wt.configget(config, 'staticmaps', 'ldd', 'wflow_ldd.map')
    river_map = wt.configget(config, 'staticmaps', 'river', 'wflow_river.map')
    outlet_map = wt.configget(config, 'staticmaps', 'outlet',
                              'wflow_outlet.map')
    riverlength_fact_map = wt.configget(config, 'staticmaps',
                                        'riverlength_fact',
                                        'wflow_riverlength_fact.map')
    soil_map = wt.configget(config, 'staticmaps', 'soil', 'wflow_soil.map')
    streamorder_map = wt.configget(config, 'staticmaps', 'streamorder',
                                   'wflow_streamorder.map')
    subcatch_map = wt.configget(config, 'staticmaps', 'subcatch',
                                'wflow_subcatch.map')

    # read mask location (optional)
    masklayer = wt.configget(config, 'mask', 'masklayer', catchshp)

    # ???? empty = pcr.ifthen(ones == 0, pcr.scalar(0))

    # TODO: check if extents are correct this way
    # TODO: check what the role of missing values is in zeros and ones (l. 123
    # in old code)

    # first add a missing value to dem_in
    ds = gdal.Open(dem_in, gdal.GA_Update)
    RasterBand = ds.GetRasterBand(1)
    fill_val = RasterBand.GetNoDataValue()

    if fill_val is None:
        RasterBand.SetNoDataValue(-9999)
    ds = None

    # reproject to clone map: see http://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python
    # resample DEM
    logger.info('Resampling dem from {:s} to {:s}'.format(
        os.path.abspath(dem_in), os.path.join(destination, dem_map)))
    wt.gdal_warp(dem_in,
                 clone_map,
                 os.path.join(destination, dem_map),
                 format='PCRaster',
                 gdal_interp=gdalconst.GRA_Average)
    # retrieve amount of rows and columns from clone
    # TODO: make windowstats applicable to source/target with different projections. This does not work yet.
    # retrieve srs from DEM
    try:
        srs_dem = wt.get_projection(dem_in)
    except:
        logger.warning(
            'No projection found in DEM, assuming WGS 1984 lat long')
        srs_dem = osr.SpatialReference()
        srs_dem.ImportFromEPSG(4326)
    clone2dem_transform = osr.CoordinateTransformation(srs, srs_dem)
    # if srs.ExportToProj4() == srs_dem.ExportToProj4():

    wt.windowstats(dem_in,
                   len(yax),
                   len(xax),
                   trans,
                   srs,
                   destination,
                   percentiles,
                   transform=clone2dem_transform,
                   logger=logger)

    ## read catchment shape-file to create catchment map
    src = rasterio.open(clone_map)
    shapefile = fiona.open(catchshp, "r")
    catchment_shapes = [feature["geometry"] for feature in shapefile]
    image = features.rasterize(catchment_shapes,
                               out_shape=src.shape,
                               all_touched=True,
                               transform=src.transform)
    catchment_domain = pcr.numpy2pcr(pcr.Ordinal, image.copy(), 0)

    ## read river shape-file and create burn layer
    shapefile = fiona.open(rivshp, "r")
    river_shapes = [feature["geometry"] for feature in shapefile]
    image = features.rasterize(river_shapes,
                               out_shape=src.shape,
                               all_touched=False,
                               transform=src.transform)
    rivers = pcr.numpy2pcr(pcr.Nominal, image.copy(), 0)
    riverdem = pcr.scalar(rivers) * pcr.readmap(
        os.path.join(destination, dem_map))
    pcr.setglobaloption("lddin")
    riverldd = pcr.lddcreate(riverdem, 1e35, 1e35, 1e35, 1e35)

    riveroutlet = pcr.cover(
        pcr.ifthen(pcr.scalar(riverldd) == 5, pcr.scalar(1000)), 0)
    burn_layer = pcr.cover((pcr.scalar(
        pcr.ifthen(pcr.streamorder(riverldd) > 1, pcr.streamorder(riverldd))) -
                            1) * 1000 + riveroutlet, 0)

    outlets_x, outlets_y = outlets
    n_outlets = len(outlets_x)
    logger.info('Number of outlets: {}'.format(n_outlets))
    if n_outlets >= 1:
        outlets_map_numbered = tr.points_to_map(pcr.scalar(0), outlets_x,
                                                outlets_y, 0.5)
        outlets_map = pcr.boolean(outlets_map_numbered)
        # snap outlets to closest river (max 1 cell closer to river)
        outlets_map = pcr.boolean(
            pcr.cover(tr.snaptomap(pcr.ordinal(outlets_map), rivers), 0))

    ## create ldd per catchment
    logger.info('Calculating ldd')
    ldddem = pcr.scalar(clone_map)

    # per subcatchment, burn dem, then create modified dem that fits the ldd of the subcatchment
    # this ldd dem is merged over catchments, to create a global ldd that abides to the subcatchment boundaries
    for idx, shape in enumerate(catchment_shapes):
        logger.info('Computing ldd for catchment ' + str(idx + 1) + '/' +
                    str(len(catchment_shapes)))
        image = features.rasterize([shape],
                                   out_shape=src.shape,
                                   all_touched=True,
                                   transform=src.transform)
        catchment = pcr.numpy2pcr(pcr.Scalar, image.copy(), 0)
        dem_burned_catchment = (
            pcr.readmap(os.path.join(destination, dem_map)) *
            pcr.scalar(catchment_domain) * catchment) - burn_layer
        ldddem_catchment = pcr.lddcreatedem(dem_burned_catchment, 1e35, 1e35,
                                            1e35, 1e35)
        ldddem = pcr.cover(ldddem, ldddem_catchment)

    wflow_ldd = pcr.lddcreate(ldddem, 1e35, 1e35, 1e35, 1e35)
    if n_outlets >= 1:
        # set outlets to pit
        wflow_ldd = pcr.ifthenelse(outlets_map, pcr.ldd(5), wflow_ldd)
        wflow_ldd = pcr.lddrepair(wflow_ldd)

    pcr.report(wflow_ldd, os.path.join(destination, 'wflow_ldd.map'))

    # compute stream order, identify river cells
    streamorder = pcr.ordinal(pcr.streamorder(wflow_ldd))
    river = pcr.ifthen(streamorder >= pcr.ordinal(minorder), pcr.boolean(1))
    # find the minimum value in the DEM and cover missing values with a river with this value. Effect is none!! so now left out!
    # mindem = int(np.min(pcr.pcr2numpy(pcr.ordinal(os.path.join(destination, dem_map)),9999999)))
    # dem_resample_map = pcr.cover(os.path.join(destination, dem_map), pcr.scalar(river)*0+mindem)
    # pcr.report(dem_resample_map, os.path.join(destination, dem_map))
    pcr.report(streamorder, os.path.join(destination, streamorder_map))
    pcr.report(river, os.path.join(destination, river_map))

    # deal with your catchments
    if gaugeshp == None:
        logger.info('No gauges defined, using outlets instead')
        gauges = pcr.ordinal(
            pcr.uniqueid(
                pcr.boolean(
                    pcr.ifthen(pcr.scalar(wflow_ldd) == 5, pcr.boolean(1)))))
        pcr.report(gauges, os.path.join(destination, gauges_map))
    # TODO: Add the gauge shape code from StaticMaps.py (line 454-489)
    # TODO: add river length map (see SticMaps.py, line 492-499)

    # since the products here (river length fraction) are not yet used
    # this is disabled for now, as it also takes a lot of computation time
    if False:
        # report river length
        # make a high resolution empty map
        dem_hr_file = os.path.join(destination, 'dem_highres.tif')
        burn_hr_file = os.path.join(destination, 'burn_highres.tif')
        demburn_hr_file = os.path.join(destination, 'demburn_highres.map')
        riv_hr_file = os.path.join(destination, 'riv_highres.map')
        wt.gdal_warp(dem_in, clone_hr, dem_hr_file)
        # wt.CreateTif(riv_hr, rows_hr, cols_hr, hr_trans, srs, 0)
        # open the shape layer
        ds = ogr.Open(rivshp)
        lyr = ds.GetLayer(0)
        wt.ogr_burn(lyr,
                    clone_hr,
                    -100,
                    file_out=burn_hr_file,
                    format='GTiff',
                    gdal_type=gdal.GDT_Float32,
                    fill_value=0)
        # read dem and burn values and add
        xax_hr, yax_hr, burn_hr, fill = wt.gdal_readmap(burn_hr_file, 'GTiff')
        burn_hr[burn_hr == fill] = 0
        xax_hr, yax_hr, dem_hr, fill = wt.gdal_readmap(dem_hr_file, 'GTiff')
        dem_hr[dem_hr == fill] = np.nan
        demburn_hr = dem_hr + burn_hr
        demburn_hr[np.isnan(demburn_hr)] = -9999
        wt.gdal_writemap(demburn_hr_file, 'PCRaster', xax_hr, yax_hr,
                         demburn_hr, -9999.)
        pcr.setclone(demburn_hr_file)
        demburn_hr = pcr.readmap(demburn_hr_file)

        logger.info('Calculating ldd to determine river length')
        ldd_hr = pcr.lddcreate(demburn_hr, 1e35, 1e35, 1e35, 1e35)
        pcr.report(ldd_hr, os.path.join(destination, 'ldd_hr.map'))
        pcr.setglobaloption('unitcell')
        riv_hr = pcr.scalar(
            pcr.streamorder(ldd_hr) >= minorder) * pcr.downstreamdist(ldd_hr)
        pcr.report(riv_hr, riv_hr_file)
        pcr.setglobaloption('unittrue')
        pcr.setclone(clone_map)
        logger.info('Computing river length')
        wt.windowstats(riv_hr_file,
                       len(yax),
                       len(xax),
                       trans,
                       srs,
                       destination,
                       stat='fact',
                       transform=False,
                       logger=logger)
        # TODO: nothing happens with the river lengths yet. Need to decide how to use these

    # report outlet map
    pcr.report(pcr.ifthen(pcr.ordinal(wflow_ldd) == 5, pcr.ordinal(1)),
               os.path.join(destination, outlet_map))

    # report subcatchment map
    subcatchment = pcr.subcatchment(wflow_ldd, gauges)
    pcr.report(pcr.ordinal(subcatchment),
               os.path.join(destination, subcatch_map))

    # Report land use map
    if landuse == None:
        logger.info(
            'No land use map used. Preparing {:s} with only ones.'.format(
                os.path.join(destination, landuse_map)))
        pcr.report(pcr.nominal(ones), os.path.join(destination, landuse_map))
    else:
        logger.info('Resampling land use from {:s} to {:s}'.format(
            os.path.abspath(landuse),
            os.path.join(destination, os.path.abspath(landuse_map))))
        wt.gdal_warp(landuse,
                     clone_map,
                     os.path.join(destination, landuse_map),
                     format='PCRaster',
                     gdal_interp=gdalconst.GRA_Mode,
                     gdal_type=gdalconst.GDT_Int32)

    # report soil map
    if soil == None:
        logger.info('No soil map used. Preparing {:s} with only ones.'.format(
            os.path.join(destination, soil_map)))
        pcr.report(pcr.nominal(ones), os.path.join(destination, soil_map))
    else:
        logger.info('Resampling soil from {:s} to {:s}'.format(
            os.path.abspath(soil),
            os.path.join(destination, os.path.abspath(soil_map))))
        wt.gdal_warp(soil,
                     clone_map,
                     os.path.join(destination, soil_map),
                     format='PCRaster',
                     gdal_interp=gdalconst.GRA_Mode,
                     gdal_type=gdalconst.GDT_Int32)

    if lai == None:
        logger.info(
            'No vegetation LAI maps used. Preparing default maps {:s} with only ones.'
            .format(os.path.join(destination, soil_map)))
        pcr.report(pcr.nominal(ones), os.path.join(destination, soil_map))
    else:
        dest_lai = os.path.join(destination, 'clim')
        os.makedirs(dest_lai)
        for month in range(12):
            lai_in = os.path.join(lai, 'LAI00000.{:03d}'.format(month + 1))
            lai_out = os.path.join(dest_lai,
                                   'LAI00000.{:03d}'.format(month + 1))
            logger.info('Resampling vegetation LAI from {:s} to {:s}'.format(
                os.path.abspath(lai_in), os.path.abspath(lai_out)))
            wt.gdal_warp(lai_in,
                         clone_map,
                         lai_out,
                         format='PCRaster',
                         gdal_interp=gdalconst.GRA_Bilinear,
                         gdal_type=gdalconst.GDT_Float32)

    # report soil map
    if other_maps == None:
        logger.info('No other maps used. Skipping other maps.')
    else:
        logger.info('Resampling list of other maps...')
        for map_file in other_maps:
            map_name = os.path.split(map_file)[1]
            logger.info('Resampling a map from {:s} to {:s}'.format(
                os.path.abspath(map_file),
                os.path.join(
                    destination,
                    os.path.splitext(os.path.basename(map_file))[0] + '.map')))
            wt.gdal_warp(map_file,
                         clone_map,
                         os.path.join(
                             destination,
                             os.path.splitext(os.path.basename(map_file))[0] +
                             '.map'),
                         format='PCRaster',
                         gdal_interp=gdalconst.GRA_Mode,
                         gdal_type=gdalconst.GDT_Float32)

    if clean:
        wt.DeleteList(glob.glob(os.path.join(destination, '*.xml')),
                      logger=logger)
        wt.DeleteList(glob.glob(os.path.join(destination, 'clim', '*.xml')),
                      logger=logger)
        wt.DeleteList(glob.glob(os.path.join(destination, '*highres*')),
                      logger=logger)
예제 #26
0
def netcdf2PCRobjClone(ncFile,varName,dateInput,\
                       useDoy = None,
                       cloneMapFileName  = None,\
                       LatitudeLongitude = True,\
                       specificFillValue = None):
    # 
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    # Get netCDF file and variable name:
    
    #~ print ncFile
    
    logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
    
    if ncFile in filecache.keys():
        f = filecache[ncFile]
        #~ print "Cached: ", ncFile
    else:
        f = nc.Dataset(ncFile)
        filecache[ncFile] = f
        #~ print "New: ", ncFile
    
    varName = str(varName)
    
    if varName == "automatic":
        nc_dims = [dim for dim in f.dimensions]
        nc_vars = [var for var in f.variables]
        for var in nc_vars:                   
            if var not in nc_dims: varName = var
        logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
    
    if LatitudeLongitude == True:
        try:
            f.variables['lat'] = f.variables['latitude']
            f.variables['lon'] = f.variables['longitude']
        except:
            pass
    
    if varName == "evapotranspiration":        
        try:
            f.variables['evapotranspiration'] = f.variables['referencePotET']
        except:
            pass

    # date
    date = dateInput
    if useDoy == "Yes": 
        idx = dateInput - 1
    else:
        if isinstance(date, str) == True: date = \
                        datetime.datetime.strptime(str(date),'%Y-%m-%d') 
        date = datetime.datetime(date.year,date.month,date.day)
        # time index (in the netCDF file)
        if useDoy == "month":
            idx = int(date.month) - 1
        else:
            nctime = f.variables['time']  # A netCDF time variable object.
            if useDoy == "yearly":
                date  = datetime.datetime(date.year,int(1),int(1))
            if useDoy == "monthly":
                date = datetime.datetime(date.year,date.month,int(1))
            if useDoy == "yearly" or useDoy == "monthly":
                # if the desired year is not available, use the first year or the last year that is available
                first_year_in_nc_file = findFirstYearInNCTime(nctime)
                last_year_in_nc_file  =  findLastYearInNCTime(nctime)
                #
                if date.year < first_year_in_nc_file:  
                    date = datetime.datetime(first_year_in_nc_file,date.month,date.day)
                    msg  = "\n"
                    msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                    msg += "The date "+str(dateInput)+" is NOT available. "
                    msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
                    msg += "\n"
                    logger.warning(msg)
                if date.year > last_year_in_nc_file:  
                    date = datetime.datetime(last_year_in_nc_file,date.month,date.day)
                    msg  = "\n"
                    msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                    msg += "The date "+str(dateInput)+" is NOT available. "
                    msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
                    msg += "\n"
                    logger.warning(msg)
            try:
                idx = nc.date2index(date, nctime, calendar = nctime.calendar, \
                                                  select='exact')
            except:                                  
                try:
                    idx = nc.date2index(date, nctime, calendar = nctime.calendar, \
                                                      select='before')
                    msg  = "\n"
                    msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                    msg += "The date "+str(dateInput)+" is NOT available. The 'before' option is used while selecting netcdf time."
                    msg += "\n"
                except:
                    idx = nc.date2index(date, nctime, calendar = nctime.calendar, \
                                                      select='after')
                    msg  = "\n"
                    msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                    msg += "The date "+str(dateInput)+" is NOT available. The 'after' option is used while selecting netcdf time."
                    msg += "\n"
                logger.warning(msg)
                                                  
    idx = int(idx)                                                  

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF) 
        cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0]-0.5*cellsizeInput
        yULInput = f.variables['lat'][0]+0.5*cellsizeInput
        # check whether both maps have the same attributes 
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][int(idx),:,:]       # still original data
    factor = 1                          # needed in regridData2FinerGrid

    if sameClone == False:
        
        logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
        # crop to cloneMap:
        #~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
        minX    = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
        xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
        xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
        #~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
        minY    = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
        yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
        yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
        cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]

        factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
        if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))

    # convert to PCR object and close f
    if specificFillValue != None:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(specificFillValue))
    else:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))
                  
    #f.close();
    f = None ; cropData = None 
    # PCRaster object
    return (outPCR)
def static_maps(
    source,  # source folder containing clone
    destination,  # destination folder
    inifile,  # ini file with various settings
    dem_in,  # path to digital elevation model (raster)
    rivshp,  # path to river network (line vector)
    catchshp,  # path to catchment polygon (polygon vector)
    gaugeshp=None,  # path to gauge point (point vector)
    landuse=None,  # path to land use / land cover (raster)
    soil=None,  # path to soil type (raster)
    lai=None,  # path to vegetation LAI (containing 12 GeoTiffs LAI00000.XXX.tif)
    other_maps=None,  # bracketed [] comma-separated list of paths to other maps that should be reprojected
    logfilename="wtools_static_maps.log",  # log file name
    verbose=True,
    clean=True,  # Clean the .xml files from static maps folder when finished
    alltouch=False,  # option to burn catchments "all touching".\nUseful when catchment-size is small compared to cellsize
    outlets=([], []),
):
    # parse other maps into an array
    if not other_maps == None:
        if type(other_maps) == str:
            print(other_maps)
            other_maps = (
                other_maps.replace(" ", "").replace("[", "").replace("]", "").split(",")
            )

    source = os.path.abspath(source)
    clone_tif = os.path.join(source, "mask.tif")
    clone_map = os.path.join(source, "mask.map")
    clone_shp = os.path.join(source, "mask.shp")
    clone_prj = os.path.join(source, "mask.prj")

    # open a logger, dependent on verbose print to screen or not
    logger, ch = wt.setlogger(logfilename, "WTOOLS", verbose)

    # create directories # TODO: check if workdir is still necessary, try to
    # keep in memory as much as possible

    # delete old files (when the source and destination folder are different)
    if np.logical_and(os.path.isdir(destination), destination is not source):
        shutil.rmtree(destination)
    if destination is not source:
        os.makedirs(destination)

    # Read mask
    if not (os.path.exists(clone_map)):
        logger.error(
            "Clone file {:s} not found. Please run create_grid first.".format(clone_map)
        )
        sys.exit(1)
    else:
        # set clone
        pcr.setclone(clone_map)
        # get the extent from clone.tif
        xax, yax, clone, fill_value = wt.gdal_readmap(clone_tif, "GTiff")
        trans = wt.get_geotransform(clone_tif)
        extent = wt.get_extent(clone_tif)
        xmin, ymin, xmax, ymax = extent
        zeros = np.zeros(clone.shape)
        ones = pcr.numpy2pcr(pcr.Scalar, np.ones(clone.shape), -9999)
        # get the projection from clone.tif
        srs = wt.get_projection(clone_tif)
        unit_clone = srs.GetAttrValue("UNIT").lower()

    # READ CONFIG FILE
    # open config-file
    if inifile is None:
        config = configparser.ConfigParser()
        config.optionxform = str
    else:
        config = wt.OpenConf(inifile)

    # read settings
    """ read parameters """
    minorder = wt.configget(config, "parameters", "riverorder_min", 3, datatype="int")
    try:
        percentiles_str = wt.configget(
            config, "parameters", "statisticmaps", "0, 100", datatype="str"
        )
        percentiles_split = percentiles_str.replace(" ", "").split(",")
        percentiles = np.array(percentiles_split, dtype="float")
    except configparser.NoOptionError:
        percentiles = [0.0, 100.0]
    # read the parameters for generating a temporary very high resolution grid
    if unit_clone == "degree":
        cellsize_hr = wt.configget(
            config, "parameters", "highres_degree", 0.0005, datatype="float"
        )
    elif (unit_clone == "metre") or (unit_clone == "meter"):
        cellsize_hr = wt.configget(
            config, "parameters", "highres_metre", 50, datatype="float"
        )

    cols_hr = int((float(xmax) - float(xmin)) / cellsize_hr + 2)
    rows_hr = int((float(ymax) - float(ymin)) / cellsize_hr + 2)
    hr_trans = (float(xmin), cellsize_hr, float(0), float(ymax), 0, -cellsize_hr)
    clone_hr = os.path.join(destination, "clone_highres.tif")
    # make a highres clone as well!
    wt.CreateTif(clone_hr, rows_hr, cols_hr, hr_trans, srs, 0)

    # read staticmap locations
    dem_map = wt.configget(config, "staticmaps", "dem", "wflow_dem.map")
    gauges_map = wt.configget(config, "staticmaps", "gauges", "wflow_gauges.map")
    landuse_map = wt.configget(config, "staticmaps", "landuse", "wflow_landuse.map")
    river_map = wt.configget(config, "staticmaps", "river", "wflow_river.map")
    outlet_map = wt.configget(config, "staticmaps", "outlet", "wflow_outlet.map")
    soil_map = wt.configget(config, "staticmaps", "soil", "wflow_soil.map")
    streamorder_map = wt.configget(
        config, "staticmaps", "streamorder", "wflow_streamorder.map"
    )
    subcatch_map = wt.configget(config, "staticmaps", "subcatch", "wflow_subcatch.map")

    # first add a missing value to dem_in
    ds = gdal.Open(dem_in, gdal.GA_Update)
    RasterBand = ds.GetRasterBand(1)
    fill_val = RasterBand.GetNoDataValue()

    if fill_val is None:
        RasterBand.SetNoDataValue(-9999)
    ds = None

    # reproject to clone map: see http://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python
    # resample DEM
    logger.info(
        "Resampling dem from {:s} to {:s}".format(
            os.path.abspath(dem_in), os.path.join(destination, dem_map)
        )
    )
    wt.gdal_warp(
        dem_in,
        clone_map,
        os.path.join(destination, dem_map),
        format="PCRaster",
        gdal_interp=gdalconst.GRA_Average,
    )
    # retrieve amount of rows and columns from clone
    # TODO: make windowstats applicable to source/target with different projections. This does not work yet.
    # retrieve srs from DEM
    try:
        srs_dem = wt.get_projection(dem_in)
    except:
        logger.warning("No projection found in DEM, assuming WGS 1984 lat long")
        srs_dem = osr.SpatialReference()
        srs_dem.ImportFromEPSG(4326)
    clone2dem_transform = osr.CoordinateTransformation(srs, srs_dem)
    # if srs.ExportToProj4() == srs_dem.ExportToProj4():

    wt.windowstats(
        dem_in,
        len(yax),
        len(xax),
        trans,
        srs,
        destination,
        percentiles,
        transform=clone2dem_transform,
        logger=logger,
    )

    ## read catchment shape-file to create catchment map
    src = rasterio.open(clone_tif)
    shapefile = fiona.open(catchshp, "r")
    catchment_shapes = [feature["geometry"] for feature in shapefile]
    image = features.rasterize(
        catchment_shapes, out_shape=src.shape, all_touched=True, transform=src.transform
    )
    catchment_domain = pcr.numpy2pcr(pcr.Ordinal, image.copy(), 0)

    ## read river shape-file and create burn layer
    shapefile = fiona.open(rivshp, "r")
    river_shapes = [feature["geometry"] for feature in shapefile]
    image = features.rasterize(
        river_shapes, out_shape=src.shape, all_touched=False, transform=src.transform
    )
    rivers = pcr.numpy2pcr(pcr.Nominal, image.copy(), 0)
    riverdem = pcr.scalar(rivers) * pcr.readmap(os.path.join(destination, dem_map))
    pcr.setglobaloption("lddin")
    riverldd = pcr.lddcreate(riverdem, 1e35, 1e35, 1e35, 1e35)

    riveroutlet = pcr.cover(pcr.ifthen(pcr.scalar(riverldd) == 5, pcr.scalar(1000)), 0)
    burn_layer = pcr.cover(
        (
            pcr.scalar(
                pcr.ifthen(pcr.streamorder(riverldd) > 1, pcr.streamorder(riverldd))
            )
            - 1
        )
        * 1000
        + riveroutlet,
        0,
    )

    outlets_x, outlets_y = outlets
    n_outlets = len(outlets_x)
    logger.info("Number of outlets: {}".format(n_outlets))
    if n_outlets >= 1:
        outlets_map_numbered = points_to_map(pcr.scalar(0), outlets_x, outlets_y, 0.5)
        outlets_map = pcr.boolean(outlets_map_numbered)
        # snap outlets to closest river (max 1 cell closer to river)
        outlets_map = pcr.boolean(
            pcr.cover(snaptomap(pcr.ordinal(outlets_map), rivers), 0)
        )

    ## create ldd per catchment
    logger.info("Calculating ldd")
    ldddem = pcr.scalar(clone_map)

    # per subcatchment, burn dem, then create modified dem that fits the ldd of the subcatchment
    # this ldd dem is merged over catchments, to create a global ldd that abides to the subcatchment boundaries
    for idx, shape in enumerate(catchment_shapes):
        logger.info(
            "Computing ldd for catchment "
            + str(idx + 1)
            + "/"
            + str(len(catchment_shapes))
        )
        image = features.rasterize(
            [shape], out_shape=src.shape, all_touched=True, transform=src.transform
        )
        catchment = pcr.numpy2pcr(pcr.Scalar, image.copy(), 0)
        dem_burned_catchment = (
            pcr.readmap(os.path.join(destination, dem_map))
            * pcr.scalar(catchment_domain)
            * catchment
        ) - burn_layer
        ldddem = pcr.cover(ldddem, dem_burned_catchment)

    wflow_ldd = pcr.lddcreate(ldddem, 1e35, 1e35, 1e35, 1e35)
    if n_outlets >= 1:
        # set outlets to pit
        wflow_ldd = pcr.ifthenelse(outlets_map, pcr.ldd(5), wflow_ldd)
        wflow_ldd = pcr.lddrepair(wflow_ldd)

    pcr.report(wflow_ldd, os.path.join(destination, "wflow_ldd.map"))

    # compute stream order, identify river cells
    streamorder = pcr.ordinal(pcr.streamorder(wflow_ldd))
    river = pcr.ifthen(streamorder >= pcr.ordinal(minorder), pcr.boolean(1))
    # find the minimum value in the DEM and cover missing values with a river with this value. Effect is none!! so now left out!
    # mindem = int(np.min(pcr.pcr2numpy(pcr.ordinal(os.path.join(destination, dem_map)),9999999)))
    # dem_resample_map = pcr.cover(os.path.join(destination, dem_map), pcr.scalar(river)*0+mindem)
    # pcr.report(dem_resample_map, os.path.join(destination, dem_map))
    pcr.report(streamorder, os.path.join(destination, streamorder_map))
    pcr.report(river, os.path.join(destination, river_map))

    # deal with your catchments
    if gaugeshp == None:
        logger.info("No gauges defined, using outlets instead")
        gauges = pcr.ordinal(
            pcr.uniqueid(
                pcr.boolean(pcr.ifthen(pcr.scalar(wflow_ldd) == 5, pcr.boolean(1)))
            )
        )
        pcr.report(gauges, os.path.join(destination, gauges_map))
    # TODO: Add the gauge shape code from StaticMaps.py (line 454-489)
    # TODO: add river length map (see SticMaps.py, line 492-499)

    # since the products here (river length fraction) are not yet used
    # this is disabled for now, as it also takes a lot of computation time
    if False:
        # report river length
        # make a high resolution empty map
        dem_hr_file = os.path.join(destination, "dem_highres.tif")
        burn_hr_file = os.path.join(destination, "burn_highres.tif")
        demburn_hr_file = os.path.join(destination, "demburn_highres.map")
        riv_hr_file = os.path.join(destination, "riv_highres.map")
        wt.gdal_warp(dem_in, clone_hr, dem_hr_file)
        # wt.CreateTif(riv_hr, rows_hr, cols_hr, hr_trans, srs, 0)
        # open the shape layer
        ds = ogr.Open(rivshp)
        lyr = ds.GetLayer(0)
        wt.ogr_burn(
            lyr,
            clone_hr,
            -100,
            file_out=burn_hr_file,
            format="GTiff",
            gdal_type=gdal.GDT_Float32,
            fill_value=0,
        )
        # read dem and burn values and add
        xax_hr, yax_hr, burn_hr, fill = wt.gdal_readmap(burn_hr_file, "GTiff")
        burn_hr[burn_hr == fill] = 0
        xax_hr, yax_hr, dem_hr, fill = wt.gdal_readmap(dem_hr_file, "GTiff")
        dem_hr[dem_hr == fill] = np.nan
        demburn_hr = dem_hr + burn_hr
        demburn_hr[np.isnan(demburn_hr)] = -9999
        wt.gdal_writemap(
            demburn_hr_file, "PCRaster", xax_hr, yax_hr, demburn_hr, -9999.0
        )
        pcr.setclone(demburn_hr_file)
        demburn_hr = pcr.readmap(demburn_hr_file)

        logger.info("Calculating ldd to determine river length")
        ldd_hr = pcr.lddcreate(demburn_hr, 1e35, 1e35, 1e35, 1e35)
        pcr.report(ldd_hr, os.path.join(destination, "ldd_hr.map"))
        pcr.setglobaloption("unitcell")
        riv_hr = pcr.scalar(pcr.streamorder(ldd_hr) >= minorder) * pcr.downstreamdist(
            ldd_hr
        )
        pcr.report(riv_hr, riv_hr_file)
        pcr.setglobaloption("unittrue")
        pcr.setclone(clone_map)
        logger.info("Computing river length")
        wt.windowstats(
            riv_hr_file,
            len(yax),
            len(xax),
            trans,
            srs,
            destination,
            stat="fact",
            transform=False,
            logger=logger,
        )
        # TODO: nothing happens with the river lengths yet. Need to decide how to use these

    # report outlet map
    pcr.report(
        pcr.ifthen(pcr.ordinal(wflow_ldd) == 5, pcr.ordinal(1)),
        os.path.join(destination, outlet_map),
    )

    # report subcatchment map
    subcatchment = pcr.subcatchment(wflow_ldd, gauges)
    pcr.report(pcr.ordinal(subcatchment), os.path.join(destination, subcatch_map))

    # Report land use map
    if landuse == None:
        logger.info(
            "No land use map used. Preparing {:s} with only ones.".format(
                os.path.join(destination, landuse_map)
            )
        )
        pcr.report(pcr.nominal(ones), os.path.join(destination, landuse_map))
    else:
        logger.info(
            "Resampling land use from {:s} to {:s}".format(
                os.path.abspath(landuse),
                os.path.join(destination, os.path.abspath(landuse_map)),
            )
        )
        wt.gdal_warp(
            landuse,
            clone_map,
            os.path.join(destination, landuse_map),
            format="PCRaster",
            gdal_interp=gdalconst.GRA_Mode,
            gdal_type=gdalconst.GDT_Int32,
        )

    # report soil map
    if soil == None:
        logger.info(
            "No soil map used. Preparing {:s} with only ones.".format(
                os.path.join(destination, soil_map)
            )
        )
        pcr.report(pcr.nominal(ones), os.path.join(destination, soil_map))
    else:
        logger.info(
            "Resampling soil from {:s} to {:s}".format(
                os.path.abspath(soil),
                os.path.join(destination, os.path.abspath(soil_map)),
            )
        )
        wt.gdal_warp(
            soil,
            clone_map,
            os.path.join(destination, soil_map),
            format="PCRaster",
            gdal_interp=gdalconst.GRA_Mode,
            gdal_type=gdalconst.GDT_Int32,
        )

    if lai == None:
        logger.info(
            "No vegetation LAI maps used. Preparing default maps {:s} with only ones.".format(
                os.path.join(destination, soil_map)
            )
        )
        pcr.report(pcr.nominal(ones), os.path.join(destination, soil_map))
    else:
        dest_lai = os.path.join(destination, "clim")
        os.makedirs(dest_lai)
        for month in range(12):
            lai_in = os.path.join(lai, "LAI00000.{:03d}".format(month + 1))
            lai_out = os.path.join(dest_lai, "LAI00000.{:03d}".format(month + 1))
            logger.info(
                "Resampling vegetation LAI from {:s} to {:s}".format(
                    os.path.abspath(lai_in), os.path.abspath(lai_out)
                )
            )
            wt.gdal_warp(
                lai_in,
                clone_map,
                lai_out,
                format="PCRaster",
                gdal_interp=gdalconst.GRA_Bilinear,
                gdal_type=gdalconst.GDT_Float32,
            )

    # report soil map
    if other_maps == None:
        logger.info("No other maps used. Skipping other maps.")
    else:
        logger.info("Resampling list of other maps...")
        for map_file in other_maps:
            logger.info(
                "Resampling a map from {:s} to {:s}".format(
                    os.path.abspath(map_file),
                    os.path.join(
                        destination,
                        os.path.splitext(os.path.basename(map_file))[0] + ".map",
                    ),
                )
            )
            wt.gdal_warp(
                map_file,
                clone_map,
                os.path.join(
                    destination,
                    os.path.splitext(os.path.basename(map_file))[0] + ".map",
                ),
                format="PCRaster",
                gdal_interp=gdalconst.GRA_Mode,
                gdal_type=gdalconst.GDT_Float32,
            )

    if clean:
        wt.DeleteList(glob.glob(os.path.join(destination, "*.xml")), logger=logger)
        wt.DeleteList(
            glob.glob(os.path.join(destination, "clim", "*.xml")), logger=logger
        )
        wt.DeleteList(glob.glob(os.path.join(destination, "*highres*")), logger=logger)
예제 #28
0
def regridMapFile2FinerGrid (rescaleFac,coarse):
    if rescaleFac ==1:
        return coarse
    return pcr.numpy2pcr(pcr.Scalar, regridData2FinerGrid(rescaleFac,pcr.pcr2numpy(coarse,MV),MV),MV)
예제 #29
0
    def dynamic(self):
        self.counter += 1
        print(
            str(self.curdate.day) + '-' + str(self.curdate.month) + '-' +
            str(self.curdate.year) + '  t = ' + str(self.counter))

        #-Snow and rain fraction settings for non-glacier part of model cell
        SnowFrac = pcr.ifthenelse(self.SnowStore > 0,
                                  pcr.scalar(1 - self.GlacFrac), 0)
        RainFrac = pcr.ifthenelse(self.SnowStore == 0,
                                  pcr.scalar(1 - self.GlacFrac), 0)

        #-Read the precipitation time-series
        if self.precNetcdfFLAG == 1:
            #-read forcing by netcdf input
            Precip = self.netcdf2PCraster.netcdf2pcrDynamic(self, pcr, 'Prec')
        else:
            #-read forcing by map input
            Precip = pcr.readmap(pcrm.generateNameT(self.Prec, self.counter))
        PrecipTot = Precip
        #-Report Precip
        self.reporting.reporting(self, pcr, 'TotPrec', Precip)
        self.reporting.reporting(self, pcr, 'TotPrecF',
                                 Precip * (1 - self.GlacFrac))

        #-Temperature and determine reference evapotranspiration
        if self.tempNetcdfFLAG == 1:
            #-read forcing by netcdf input
            Temp = self.netcdf2PCraster.netcdf2pcrDynamic(self, pcr, 'Temp')
        else:
            #-read forcing by map input
            Temp = pcr.readmap(pcrm.generateNameT(self.Tair, self.counter))

        if self.ETREF_FLAG == 0:
            if self.TminNetcdfFLAG == 1:
                #-read forcing by netcdf input
                TempMin = self.netcdf2PCraster.netcdf2pcrDynamic(
                    self, pcr, 'Tmin')
            else:
                #-read forcing by map input
                TempMin = pcr.readmap(
                    pcrm.generateNameT(self.Tmin, self.counter))
            if self.TmaxNetcdfFLAG == 1:
                #-read forcing by netcdf input
                TempMax = self.netcdf2PCraster.netcdf2pcrDynamic(
                    self, pcr, 'Tmax')
            else:
                #-read forcing by map input
                TempMax = pcr.readmap(
                    pcrm.generateNameT(self.Tmax, self.counter))
            ETref = self.Hargreaves.Hargreaves(
                pcr, self.Hargreaves.extrarad(self, pcr), Temp, TempMax,
                TempMin)
        else:
            ETref = pcr.readmap(pcrm.generateNameT(self.ETref, self.counter))
        self.reporting.reporting(self, pcr, 'TotETref', ETref)
        self.reporting.reporting(self, pcr, 'TotETrefF',
                                 ETref * (1 - self.GlacFrac))

        #-Interception and effective precipitation
        if self.DynVegFLAG == 1:
            #-read dynamic processes dynamic vegetation
            Precip = self.dynamic_veg.dynamic(self, pcr, pcrm, np, Precip,
                                              ETref)

        elif self.KcStatFLAG == 0:
            #-Try to read the KC map series
            try:
                self.Kc = pcr.readmap(
                    pcrm.generateNameT(self.Kcmaps, self.counter))
                self.KcOld = self.Kc
            except:
                self.Kc = self.KcOld
        #-report mm effective precipitation for sub-basin averages
        if self.mm_rep_FLAG == 1 and self.Prec_mm_FLAG == 1 and (
                self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
            self.PrecSubBasinTSS.sample(
                pcr.catchmenttotal(Precip *
                                   (1 - self.GlacFrac), self.FlowDir) /
                pcr.catchmenttotal(1, self.FlowDir))

        #-Snow, rain, and glacier calculations for glacier fraction of cell
        if self.GlacFLAG:
            #-read dynamic processes glacier
            Rain_GLAC, Snow_GLAC, ActSnowMelt_GLAC, SnowR_GLAC, GlacMelt, GlacPerc, self.GlacR = self.glacier.dynamic(
                self, pcr, pd, Temp, Precip)
        #-If glacier module is not used, then
        else:
            Rain_GLAC = 0
            Snow_GLAC = 0
            ActSnowMelt_GLAC = 0
            self.TotalSnowStore_GLAC = 0
            SnowR_GLAC = 0
            self.GlacR = 0
            GlacMelt = 0
            GlacPerc = 0

        # Calculate snow and rain for non-glacier part of cell
        if self.SnowFLAG == 1:
            #-read dynamic processes snow
            Rain, self.SnowR, OldTotalSnowStore = self.snow.dynamic(
                self, pcr, Temp, Precip, Snow_GLAC, ActSnowMelt_GLAC, SnowFrac,
                RainFrac, SnowR_GLAC)
        else:
            Rain = Precip
            self.SnowR = 0
            OldTotalSnowStore = 0
            self.TotalSnowStore = 0
        #-Report Rain
        self.reporting.reporting(self, pcr, 'TotRain', Rain)
        self.reporting.reporting(self, pcr, 'TotRainF',
                                 Rain * (1 - self.GlacFrac) +
                                 Rain_GLAC)  # for entire cell

        #-Potential evapotranspiration
        ETpot = self.ET.ETpot(ETref, self.Kc)
        if self.ETOpenWaterFLAG == 1:
            self.ETOpenWater = self.ET.ETpot(ETref, self.kcOpenWater)
        #-Report ETpot
        self.reporting.reporting(self, pcr, 'TotETpot', ETpot)
        self.reporting.reporting(self, pcr, 'TotETpotF', ETpot * RainFrac)

        #-Rootzone calculations
        self.RootWater = self.RootWater + self.CapRise
        #-Calculate rootzone runoff
        tempvar = self.rootzone.RootRunoff(self, pcr, RainFrac, Rain)
        #-Rootzone runoff
        RootRunoff = tempvar[0]
        #-Infiltration
        Infil = tempvar[1]
        #-Report infiltration
        self.reporting.reporting(self, pcr, 'Infil', Infil)
        #-Updated rootwater content
        self.RootWater = pcr.ifthenelse(RainFrac > 0, self.RootWater + Infil,
                                        self.RootWater)

        #-Actual evapotranspiration
        if self.PlantWaterStressFLAG == 1:
            etreddry = self.ET.ks(self, pcr, ETpot)
        else:
            etreddry = pcr.max(
                pcr.min((self.RootWater - self.RootDry) /
                        (self.RootWilt - self.RootDry), 1), 0)
        self.reporting.reporting(self, pcr, 'PlantStress', 1 - etreddry)
        ETact = self.ET.ETact(pcr, ETpot, self.RootWater, self.RootSat,
                              etreddry, RainFrac)
        #-Report the actual evapotranspiration
        self.reporting.reporting(
            self, pcr, 'TotETact',
            ETact * (1 - self.openWaterFrac) +
            self.ETOpenWater * self.openWaterFrac)
        #-Actual evapotranspiration, corrected for rain fraction
        ActETact = ETact * RainFrac
        #-Report the actual evapotranspiration, corrected for rain fraction
        self.reporting.reporting(self, pcr, 'TotETactF', ActETact)
        if self.mm_rep_FLAG == 1 and self.ETa_mm_FLAG == 1 and (
                self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
            self.ETaSubBasinTSS.sample(
                pcr.catchmenttotal(ActETact, self.FlowDir) /
                pcr.catchmenttotal(1, self.FlowDir))
        #-Update rootwater content
        self.RootWater = pcr.max(self.RootWater - ETact, 0)

        #-Calculate drainage
        temp_RootDrain = self.rootzone.RootDrainage(
            pcr, self.RootWater, self.RootDrain, self.RootField, self.RootSat,
            self.RootDrainVel, self.RootTT)
        #-Calculate percolation
        temp_rootperc = self.rootzone.RootPercolation(pcr, self.RootWater,
                                                      self.SubWater,
                                                      self.RootField,
                                                      self.RootTT, self.SubSat)
        #-Total sum of water able to leave the soil
        RootOut = temp_RootDrain + temp_rootperc
        #-Calculate new values for drainage and percolation (to be used when RootOut > RootExcess)
        newdrain, newperc = self.rootzone.CalcFrac(pcr, self.RootWater,
                                                   self.RootField,
                                                   temp_RootDrain,
                                                   temp_rootperc)
        #-Determine whether the new values need to be used
        rootexcess = pcr.max(self.RootWater - self.RootField, 0)
        self.RootDrain = pcr.ifthenelse(RootOut > rootexcess, newdrain,
                                        temp_RootDrain)
        rootperc = pcr.ifthenelse(RootOut > rootexcess, newperc, temp_rootperc)
        #-Update the RootWater content
        # Roottemp = self.RootWater
        self.RootWater = self.RootWater - (self.RootDrain + rootperc)

        #-Report rootzone percolation, corrected for fraction
        self.reporting.reporting(self, pcr, 'TotRootPF',
                                 rootperc * (1 - self.GlacFrac))
        #-Report rootwater content
        self.reporting.reporting(self, pcr, 'StorRootW',
                                 self.RootWater * (1 - self.openWaterFrac))

        #-Sub soil calculations
        self.SubWater = self.SubWater + rootperc
        if self.GroundFLAG == 0:
            if self.SeepStatFLAG == 0:
                try:
                    self.SeePage = pcr.readmap(
                        pcrm.generateNameT(self.Seepmaps, self.counter))
                    self.SeepOld = self.SeePage
                except:
                    self.SeePage = self.SeepOld

            #-Report seepage
            self.reporting.reporting(self, pcr, 'TotSeepF',
                                     pcr.scalar(self.SeePage))
            self.SubWater = pcr.min(pcr.max(self.SubWater - self.SeePage, 0),
                                    self.SubSat)
            if self.mm_rep_FLAG == 1 and self.Seep_mm_FLAG == 1 and (
                    self.RoutFLAG == 1 or self.ResFLAG == 1
                    or self.LakeFLAG == 1):
                self.SeepSubBasinTSS.sample(
                    pcr.catchmenttotal(self.SeePage, self.FlowDir) /
                    pcr.catchmenttotal(1, self.FlowDir))
        #-Capillary rise
        self.CapRise = self.subzone.CapilRise(pcr, self.SubField,
                                              self.SubWater, self.CapRiseMax,
                                              self.RootWater, self.RootSat,
                                              self.RootField)
        #-Report capillary rise, corrected for fraction
        self.reporting.reporting(self, pcr, 'TotCapRF',
                                 self.CapRise * (1 - self.GlacFrac))
        #-Update sub soil water content
        self.SubWater = self.SubWater - self.CapRise
        if self.GroundFLAG == 1:  # sub percolation will be calculated instead of subdrainage
            subperc = self.subzone.SubPercolation(pcr, self.SubWater,
                                                  self.SubField, self.SubTT,
                                                  self.Gw, self.GwSat)
            ActSubPerc = subperc * (1 - self.GlacFrac)
            #-Report the subzone percolation, corrected for the fraction
            self.reporting.reporting(self, pcr, 'TotSubPF', ActSubPerc)
            #-Update sub soil water content
            self.SubWater = self.SubWater - subperc
        else:  # sub drainage will be calculated instead of sub percolation
            self.SubDrain = self.subzone.SubDrainage(pcr, self.SubWater,
                                                     self.SubField,
                                                     self.SubSat,
                                                     self.SubDrainVel,
                                                     self.SubDrain, self.SubTT)
            #-Report drainage from subzone
            self.reporting.reporting(self, pcr, 'TotSubDF', self.SubDrain)
            #-Update sub soil water content
            self.SubWater = self.SubWater - self.SubDrain
        #-Report rootwater content
        self.reporting.reporting(self, pcr, 'StorSubW',
                                 self.SubWater * (1 - self.openWaterFrac))

        #-Changes in soil water storage
        OldSoilWater = self.SoilWater
        self.SoilWater = (self.RootWater + self.SubWater) * (1 - self.GlacFrac)

        #-Rootzone runoff
        self.RootRR = RootRunoff * RainFrac * (1 - self.openWaterFrac)
        #-Report rootzone runoff, corrected for fraction
        self.reporting.reporting(self, pcr, 'TotRootRF', self.RootRR)
        #-Rootzone drainage
        self.RootDR = self.RootDrain * (1 - self.GlacFrac) * (
            1 - self.openWaterFrac)
        #-Report rootzone drainage, corrected for fraction
        self.reporting.reporting(self, pcr, 'TotRootDF', self.RootDR)
        #-Rain runoff
        self.RainR = self.RootRR + self.RootDR
        #-Report rain runoff
        self.reporting.reporting(self, pcr, 'TotRainRF', self.RainR)

        #-Groundwater calculations
        if self.GroundFLAG == 1:
            #-read dynamic processes groundwater
            self.groundwater.dynamic(self, pcr, ActSubPerc, GlacPerc)
        else:
            #-Use drainage from subsoil as baseflow
            self.BaseR = self.SubDrain
            #-Groundwater level as scaled between min and max measured gwl
            SoilAct = self.RootWater + self.SubWater
            SoilRel = (SoilAct - self.SoilMin) / (
                self.SoilMax - self.SoilMin
            )  # scale between 0 (dry) and 1 (wet)
            GWL = self.GWL_base - (SoilRel - 0.5) * self.GWL_base
            #-Report groundwater
            self.reporting.reporting(self, pcr, 'GWL', GWL)

        #-Report Total runoff
        TotR = self.BaseR + self.RainR + self.SnowR + self.GlacR
        self.reporting.reporting(self, pcr, 'TotRF', TotR)

        #-Routing for lake and/or reservoir modules
        if self.LakeFLAG == 1 or self.ResFLAG == 1:
            #-read dynamic processes advanced routing
            Q = self.advanced_routing.dynamic(self, pcr, pcrm, config, TotR,
                                              self.ETOpenWater, PrecipTot)

        #-Normal routing module
        elif self.RoutFLAG == 1:
            self.routing.dynamic(self, pcr, TotR)

            if self.GlacFLAG:
                #-read dynamic reporting processes glacier
                self.glacier.dynamic_reporting(self, pcr, pd, np)

        #-Water balance
        if self.GlacFLAG and self.GlacRetreat == 1:
            GlacTable_MODid = self.GlacTable.loc[:, ['FRAC_GLAC', 'ICE_DEPTH']]
            GlacTable_MODid['ICE_DEPTH'] = GlacTable_MODid[
                'ICE_DEPTH'] * GlacTable_MODid['FRAC_GLAC']
            GlacTable_MODid = GlacTable_MODid.groupby(
                GlacTable_MODid.index).sum()
            GlacTable_MODid.fillna(0., inplace=True)
            #-Report pcraster map of glacier depth
            iceDepth = pcr.numpy.zeros(self.ModelID_1d.shape)
            iceDepth[self.GlacierKeys] = GlacTable_MODid['ICE_DEPTH']
            iceDepth = iceDepth.reshape(self.ModelID.shape)
            iceDepth = pcr.numpy2pcr(pcr.Scalar, iceDepth, self.MV)
            iceDepth = pcr.ifthen(
                self.clone, iceDepth)  #-only use values where clone is True
            iceDepth = iceDepth * 1000  # in mm
            #-change in storage
            dS = ((self.RootWater - self.oldRootWater) + (self.SubWater - self.oldSubWater)) * (1-self.GlacFrac) + (self.Gw - self.oldGw) + \
             (self.TotalSnowStore-OldTotalSnowStore) + (iceDepth - self.oldIceDepth)
            #-set old state variables for glacier
            self.oldIceDepth = iceDepth
            iceDepth = None
            del iceDepth
            GlacTable_MODid = None
            del GlacTable_MODid
        elif self.GroundFLAG:
            #-change in storage
            dS = ((self.RootWater - self.oldRootWater) + (self.SubWater - self.oldSubWater)) * (1-self.GlacFrac) + (self.Gw - self.oldGw) + \
             (self.TotalSnowStore-OldTotalSnowStore)
            # set old state variables for groundwater
            self.oldGw = self.Gw
        else:
            #-change in storage
            dS = ((self.RootWater - self.oldRootWater) +
                  (self.SubWater - self.oldSubWater)) * (1 - self.GlacFrac) + (
                      self.TotalSnowStore - OldTotalSnowStore)

        #-water balance per time step
        if self.GroundFLAG:
            waterbalance = Precip - ActETact - self.BaseR - self.RainR - self.SnowR - self.GlacR - dS
        else:
            waterbalance = Precip - ActETact - self.BaseR - self.RainR - self.SnowR - dS - self.SeePage
        self.reporting.reporting(self, pcr, 'wbal', waterbalance)

        #-total water balance
        self.waterbalanceTot = self.waterbalanceTot + waterbalance
        #-report water balance and accumulated water balance
        if self.wbal_TSS_FLAG and (self.RoutFLAG == 1 or self.ResFLAG == 1
                                   or self.LakeFLAG == 1):
            self.wbalTSS.sample(
                pcr.catchmenttotal(waterbalance, self.FlowDir) /
                pcr.catchmenttotal(1., self.FlowDir))
            self.wbalTotTSS.sample(
                pcr.catchmenttotal(self.waterbalanceTot, self.FlowDir) /
                pcr.catchmenttotal(1., self.FlowDir))
        # set old state variables
        self.oldRootWater = self.RootWater
        self.oldSubWater = self.SubWater
        waterbalance = None
        del waterbalance
        dS = None
        del dS
        #-End of water balance calculations

        #-Sediment yield
        if self.SedFLAG == 1:
            #-determine runoff in mm per day
            if self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1:
                Runoff = (Q * 3600 * 24) / pcr.cellarea() * 1000
            else:
                Runoff = TotR

            #-MUSLE
            if self.SedModel == 1:
                #-read dynamic processes musle
                self.musle.dynamic(self, pcr, Runoff)

                #-sediment transport
                if self.SedTransFLAG == 1:
                    #-read dynamic sediment transport processes musle
                    self.sediment_transport.dynamic_musle(self, pcr)

            #-Modified Morgan-Morgan-Finney model
            if self.SedModel == 2:
                #-determine soil erosion in transport (G)
                G = self.mmf.dynamic(self, pcr, Precip, Runoff)

                #-sediment transport
                if self.SedTransFLAG == 1:
                    #-read dynamic sediment transport processes mmf
                    self.sediment_transport.dynamic_mmf(
                        self, pcr, Runoff, np, G)

        #-update current date
        self.curdate = self.curdate + self.datetime.timedelta(days=1)
예제 #30
0
def netcdf2PCRobjCloneWithoutTime(ncFile,varName,
                                  cloneMapFileName  = None,\
                                  LatitudeLongitude = True,\
                                  specificFillValue = None):
    # 
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    # Get netCDF file and variable name:
    if ncFile in filecache.keys():
        f = filecache[ncFile]
        #~ print "Cached: ", ncFile
    else:
        f = nc.Dataset(ncFile)
        filecache[ncFile] = f
        #~ print "New: ", ncFile
    
    #print ncFile
    #f = nc.Dataset(ncFile)  
    varName = str(varName)
    
    if LatitudeLongitude == True:
        try:
            f.variables['lat'] = f.variables['latitude']
            f.variables['lon'] = f.variables['longitude']
        except:
            pass
    
    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF) 
        cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0]-0.5*cellsizeInput
        yULInput = f.variables['lat'][0]+0.5*cellsizeInput
        # check whether both maps have the same attributes 
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][:,:]       # still original data
    factor = 1                                 # needed in regridData2FinerGrid
    if sameClone == False:
        # crop to cloneMap:
        minX    = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
        xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
        xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
        minY    = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
        yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
        yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
        cropData = f.variables[varName][yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
        factor = int(round(float(cellsizeInput)/float(cellsizeClone)))

    # convert to PCR object and close f
    if specificFillValue != None:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(specificFillValue))
    else:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))
                  
    #~ # debug:
    #~ pcr.report(outPCR,"tmp.map")
    #~ print(varName)
    #~ os.system('aguila tmp.map')
    
    #f.close();
    f = None ; cropData = None 
    # PCRaster object
    return (outPCR)
def joinMaps(inputTuple):
	'''Merges maps starting from an input tuple that specifies the output map name, the number of rows\
 and the number rows, columns, ULL X and Y coordinates, cell length and the missing value identifer and a list of input maps'''
	outputFileName= inputTuple[0]
	nrRows= inputTuple[1]
	nrCols= inputTuple[2]
	xMin= inputTuple[3]
	yMax= inputTuple[4]
	cellLength= inputTuple[5]
	MV= inputTuple[6]
	fileNames= inputTuple[7]
	cloneFileName= inputTuple[8]
	#-echo to screen/logger
	msg = 'combining files for %s' % outputFileName,
	logger.info(msg)
	#-get extent
	xMax= xMin+nrCols*cellLength
	yMin= yMax-nrRows*cellLength
	xCoordinates= xMin+np.arange(nrCols+1)*cellLength
	yCoordinates= yMin+np.arange(nrRows+1)*cellLength
	yCoordinates= np.flipud(yCoordinates)
	msg = 'between %.2f, %.2f and %.2f, %.2f' % (xMin,yMin,xMax,yMax)
	logger.info(msg)

	#~ #-set output array
	#~ variableArray= np.ones((nrRows,nrCols))*MV
	#-set initial output aaray to zero
	variableArray= np.zeros((nrRows,nrCols))*MV

	#-iterate over maps
	for fileName in fileNames:
		
		print fileName
		attributeClone= getMapAttributesALL(fileName)
		cellLengthClone= attributeClone['cellsize']
		rowsClone= attributeClone['rows']
		colsClone= attributeClone['cols']
		xULClone= attributeClone['xUL']
		yULClone= attributeClone['yUL']
		# check whether both maps have the same attributes and process
		process, nd= checkResolution(cellLength,cellLengthClone)
		
		if process:
			#-get coordinates and locations
			sampleXMin= xULClone
			sampleXMax= xULClone+colsClone*cellLengthClone
			sampleYMin= yULClone-rowsClone*cellLengthClone
			sampleYMax= yULClone
			sampleXCoordinates= sampleXMin+np.arange(colsClone+1)*cellLengthClone
			sampleYCoordinates= sampleYMin+np.arange(rowsClone+1)*cellLengthClone
			sampleYCoordinates= np.flipud(sampleYCoordinates)
			sampleXMin= getMax(xMin,sampleXMin)
			sampleXMax= getMin(xMax,sampleXMax)
			sampleYMin= getMax(yMin,sampleYMin)
			sampleYMax= getMin(yMax,sampleYMax)
			sampleRow0= getPosition(sampleYMin,sampleYCoordinates,nd)
			sampleRow1= getPosition(sampleYMax,sampleYCoordinates,nd)			
			sampleCol0= getPosition(sampleXMin,sampleXCoordinates,nd)
			sampleCol1= getPosition(sampleXMax,sampleXCoordinates,nd)
			sampleRow0, sampleRow1= checkRowPosition(sampleRow0,sampleRow1)
			variableRow0= getPosition(sampleYMin,yCoordinates,nd)
			variableRow1= getPosition(sampleYMax,yCoordinates,nd)
			variableCol0= getPosition(sampleXMin,xCoordinates,nd)
			variableCol1= getPosition(sampleXMax,xCoordinates,nd)
			variableRow0,variableRow1= checkRowPosition(variableRow0,variableRow1)
			#-read sample array
			setclone(fileName)
			sampleArray= pcr2numpy(readmap(fileName),MV)
			
			print sampleArray
			
			sampleNrRows, sampleNrCols= sampleArray.shape

			# -create mask
			#~ mask= (variableArray[variableRow0:variableRow1,variableCol0:variableCol1] == MV) &\
				#~ (sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1] <> MV)
			mask= (variableArray[variableRow0:variableRow1,variableCol0:variableCol1] <> MV) &\
				(sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1] <> MV)

			#-add values
			msg = ' adding values in %d, %d rows, columns from (x, y) %.3f, %.3f and %.3f, %.3f to position (row, col) %d, %d and %d, %d' %\
				(sampleNrRows, sampleNrCols,sampleXMin,sampleYMin,sampleXMax,sampleYMax,variableRow0,variableCol0,variableRow1,variableCol1)
			logger.info(msg)	
	
			#~ variableArray[variableRow0:variableRow1,variableCol0:variableCol1][mask]= \
				#~ sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1][mask]
	
			variableArray[variableRow0:variableRow1,variableCol0:variableCol1][mask] += sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1][mask]

		else:

			msg = '%s does not match resolution and is not processed' % fileName
			logger.warning(msg)

	#-report output map
	setclone(cloneFileName)
	report(numpy2pcr(Scalar,variableArray,MV),outputFileName)
예제 #32
0
def complexreservoir(
    waterlevel,
    ReserVoirLocs,
    LinkedReserVoirLocs,
    ResArea,
    ResThreshold,
    ResStorFunc,
    ResOutflowFunc,
    sh,
    hq,
    res_b,
    res_e,
    inflow,
    precip,
    pet,
    ReservoirComplexAreas,
    JDOY,
    timestepsecs=86400,
):

    mv = -999.0

    inflow = pcr.ifthen(pcr.boolean(ReserVoirLocs), inflow)

    prec_av = pcr.ifthen(pcr.boolean(ReserVoirLocs),
                         pcr.areaaverage(precip, ReservoirComplexAreas))
    pet_av = pcr.ifthen(pcr.boolean(ReserVoirLocs),
                        pcr.areaaverage(pet, ReservoirComplexAreas))

    np_reslocs = pcr.pcr2numpy(ReserVoirLocs, 0.0)
    np_linkedreslocs = pcr.pcr2numpy(LinkedReserVoirLocs, 0.0)

    _outflow = []
    nr_loop = np.max([int(timestepsecs / 21600), 1])
    for n in range(0, nr_loop):
        np_waterlevel = pcr.pcr2numpy(waterlevel, np.nan)
        np_waterlevel_lower = np_waterlevel.copy()

        for val in np.unique(np_linkedreslocs):
            if val > 0:
                np_waterlevel_lower[np_linkedreslocs == val] = np_waterlevel[
                    np.where(np_reslocs == val)]

        diff_wl = np_waterlevel - np_waterlevel_lower
        diff_wl[np.isnan(diff_wl)] = mv
        np_waterlevel_lower[np.isnan(np_waterlevel_lower)] = mv

        pcr_diff_wl = pcr.numpy2pcr(pcr.Scalar, diff_wl, mv)
        pcr_wl_lower = pcr.numpy2pcr(pcr.Scalar, np_waterlevel_lower, mv)

        storage_start = pcr.ifthenelse(
            ResStorFunc == 1,
            ResArea * waterlevel,
            lookupResFunc(ReserVoirLocs, waterlevel, sh, "0-1"),
        )

        outflow = pcr.ifthenelse(
            ResOutflowFunc == 1,
            lookupResRegMatr(ReserVoirLocs, waterlevel, hq, JDOY),
            pcr.ifthenelse(
                pcr_diff_wl >= 0,
                pcr.max(res_b * (waterlevel - ResThreshold)**res_e, 0),
                pcr.min(-1 * res_b * (pcr_wl_lower - ResThreshold)**res_e, 0),
            ),
        )

        np_outflow = pcr.pcr2numpy(outflow, np.nan)
        np_outflow_linked = np_reslocs * 0.0

        with np.errstate(invalid="ignore"):
            if np_outflow[np_outflow < 0] is not None:
                np_outflow_linked[np.in1d(
                    np_reslocs, np_linkedreslocs[np_outflow < 0]).reshape(
                        np_linkedreslocs.shape)] = np_outflow[np_outflow < 0]

        outflow_linked = pcr.numpy2pcr(pcr.Scalar, np_outflow_linked, 0.0)

        fl_nr_loop = float(nr_loop)
        storage = (
            storage_start + (inflow * timestepsecs / fl_nr_loop) +
            (prec_av / fl_nr_loop / 1000.0) * ResArea -
            (pet_av / fl_nr_loop / 1000.0) * ResArea -
            (pcr.cover(outflow, 0.0) * timestepsecs / fl_nr_loop) +
            (pcr.cover(outflow_linked, 0.0) * timestepsecs / fl_nr_loop))

        waterlevel = pcr.ifthenelse(
            ResStorFunc == 1,
            waterlevel + (storage - storage_start) / ResArea,
            lookupResFunc(ReserVoirLocs, storage, sh, "1-0"),
        )

        np_outflow_nz = np_outflow * 0.0
        with np.errstate(invalid="ignore"):
            np_outflow_nz[np_outflow > 0] = np_outflow[np_outflow > 0]
        _outflow.append(np_outflow_nz)

    outflow_av_temp = np.average(_outflow, 0)
    outflow_av_temp[np.isnan(outflow_av_temp)] = mv
    outflow_av = pcr.numpy2pcr(pcr.Scalar, outflow_av_temp, mv)

    return waterlevel, outflow_av, prec_av, pet_av, storage
예제 #33
0
  def test_numpy2pcr(self):
      nrRows, nrCols, cellSize = 3, 2, 1.0
      west, north = 0.0, 0.0
      pcraster.setclone(nrRows, nrCols, cellSize, west, north)

      # Values in array must fit the value scale of the raster exactly. This
      # will be checked.

      # Valid boolean values are: 0, 1, missing_value.
      # Valid ldd values are: 1, 2, 3, 4, 5, 6, 7, 8, 9, missing_value.
      # Valid nominal values are: [-2^31 + 1, 2^31], missing_value.
      # Valid ordinal values are: [-2^31 + 1, 2^31], missing_value.
      # Valid scalar values are: All 32 bit float values.
      # Valid directional values are: All 32 bit float values.

      # bool_min = 0
      # bool_max = 1
      int8_min = numpy.iinfo(numpy.int8).min
      int8_max = numpy.iinfo(numpy.int8).max
      int32_min = numpy.iinfo(numpy.int32).min
      int32_max = numpy.iinfo(numpy.int32).max
      int64_min = numpy.iinfo(numpy.int64).min
      int64_max = numpy.iinfo(numpy.int64).max

      # bool -> Boolean (uint8)
      raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
          [1,  1],
          [0,  5],
          [1,  1]], numpy.bool), 5)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (True, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (False, True))
      # It is not possible to create a bool array with other values than
      # 0 and 1. Passing 5 as missing value has no effect.
      self.assertEqual(pcraster.cellvalue(raster, 2, 2), (True, True))

      # int8 -> Boolean (uint8)
      raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
          [1,  1],
          [0,  5],
          [1,  1]], numpy.int8), 5)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (True, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (False, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
              [1,  1],
              [9,  5],
              [1,  1]], numpy.int8), 5)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 9 at input array [1][0] for Boolean map")

      # int8 -> Ldd (uint8)
      raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
          [1,  2],
          [5,  15],
          [8,  9]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (1, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
              [ 1,  2],
              [10, 15],
              [ 8,  9]], numpy.int8), 15)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 10 at input array [1][0] for LDD map")

      # int8 -> Nominal (int32)
      # All valid int8 values are valid nominal values.
      raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [int8_min,         2],
          [       5,        15],
          [       8,  int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int8 -> Ordinal (int32)
      # All valid int8 values are valid ordinal values.
      raster = pcraster.numpy2pcr(pcraster.Ordinal, numpy.array([
          [int8_min,         2],
          [       5,        15],
          [       8,  int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int8 -> Scalar (float32)
      # All valid int8 values are valid scalar values.
      raster = pcraster.numpy2pcr(pcraster.Scalar, numpy.array([
          [int8_min,        2],
          [       5,       15],
          [       8, int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int8 -> Directional (float32)
      # All valid int8 values are valid directional values.
      raster = pcraster.numpy2pcr(pcraster.Directional, numpy.array([
          [int8_min,        2],
          [       5,       15],
          [       8, int8_max]], numpy.int8), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int8_min, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (       5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (int8_max, True))

      # int16 TODO
      # int32 TODO

      # int64 -> Boolean (uint8)
      raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
          [1,  1],
          [0,  5],
          [1,  1]], numpy.int64), 5)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (True, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (False, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Boolean, numpy.array([
              [1,  1],
              [9,  5],
              [1,  1]], numpy.int64), 5)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 9 at input array [1][0] for Boolean map")

      # int64 -> Ldd (uint8)
      raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
          [1,  2],
          [5,  15],
          [8,  9]], numpy.int64), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (1, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Ldd, numpy.array([
              [ 1,  2],
              [10, 15],
              [ 8,  9]], numpy.int64), 15)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value 10 at input array [1][0] for LDD map")

      # int64 -> Nominal (int32)
      raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [int32_min+1,         2],
          [          5,        15],
          [          8, int32_max]], numpy.int64), 15)
      self.assertEqual(pcraster.cellvalue(raster, 1, 1), (int32_min + 1, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 1), (            5, True))
      self.assertEqual(pcraster.cellvalue(raster, 2, 2)[1], False)
      self.assertEqual(pcraster.cellvalue(raster, 3, 2), (    int32_max, True))

      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [int32_min+1,         2],
          [  int32_min,        15],
          [          8, int32_max]], numpy.int64), 15)
      self.assertEqual(str(context_manager.exception),
          "Incorrect value -2147483648 at input array [1][0] for Nominal map")

      # uint8 TODO
      # uint16 TODO
      # uint32 TODO
      # uint64 TODO

      # float16 TODO
      # float32 TODO
      # float64 TODO

      # complex64: Not supported.
      with self.assertRaises(Exception) as context_manager:
          raster = pcraster.numpy2pcr(pcraster.Nominal, numpy.array([
          [-2, -1],
          [ 0, 15],
          [ 1,  2]], numpy.complex64), 15)
      self.assertEqual(str(context_manager.exception),
          "Unsupported array type")
예제 #34
0
def main():
    ### Read input arguments #####
    parser = OptionParser()
    usage = "usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option('-q', '--quiet',
                      dest='verbose', default=True, action='store_false',
                      help='do not print status messages to stdout')
    parser.add_option('-i', '--ini', dest='inifile',
                      default='hand_contour_inun.ini', nargs=1,
                      help='ini configuration file')
    parser.add_option('-f', '--flood_map',
                      nargs=1, dest='flood_map',
                      help='Flood map file (NetCDF point time series file')
    parser.add_option('-v', '--flood_variable',
                      nargs=1, dest='flood_variable',
                      default='water_level',
                      help='variable name of flood water level')
    parser.add_option('-b', '--bankfull_map',
                      dest='bankfull_map', default='',
                      help='Map containing bank full level (is subtracted from flood map, in NetCDF)')
    parser.add_option('-c', '--catchment',
                      dest='catchment_strahler', default=7, type='int',
                      help='Strahler order threshold >= are selected as catchment boundaries')
    parser.add_option('-s', '--hand_strahler',
                      dest='hand_strahler', default=7, type='int',
                      help='Strahler order threshold >= selected as riverine')
    parser.add_option('-d', '--destination',
                      dest='dest_path', default='inun',
                      help='Destination path')
    (options, args) = parser.parse_args()

    if not os.path.exists(options.inifile):
        print 'path to ini file cannot be found'
        sys.exit(1)
    options.dest_path = os.path.abspath(options.dest_path)

    if not(os.path.isdir(options.dest_path)):
        os.makedirs(options.dest_path)

    # set up the logger
    flood_name = os.path.split(options.flood_map)[1].split('.')[0]
    case_name = 'inun_{:s}_hand_{:02d}_catch_{:02d}'.format(flood_name, options.hand_strahler, options.catchment_strahler)
    logfilename = os.path.join(options.dest_path, 'hand_contour_inun.log')
    logger, ch = inun_lib.setlogger(logfilename, 'HAND_INUN', options.verbose)
    logger.info('$Id: $')
    logger.info('Flood map: {:s}'.format(options.flood_map))
    logger.info('Bank full map: {:s}'.format(options.bankfull_map))
    logger.info('Destination path: {:s}'.format(options.dest_path))
    # read out ini file
    ### READ CONFIG FILE
    # open config-file
    config = inun_lib.open_conf(options.inifile)
    
    # read settings
    options.dem_file = inun_lib.configget(config, 'maps',
                                  'dem_file',
                                  True)
    options.ldd_file = inun_lib.configget(config, 'maps',
                                'ldd_file',
                                 True)
    options.stream_file = inun_lib.configget(config, 'maps',
                                'stream_file',
                                 True)
    options.riv_length_file = inun_lib.configget(config, 'maps',
                                'riv_length_file',
                                 True)
    options.riv_width_file = inun_lib.configget(config, 'maps',
                                'riv_width_file',
                                 True)
    options.file_format = inun_lib.configget(config, 'maps',
                                'file_format', 0, datatype='int')
    options.x_tile = inun_lib.configget(config, 'tiling',
                                  'x_tile', 10000, datatype='int')
    options.y_tile = inun_lib.configget(config, 'tiling',
                                  'y_tile', 10000, datatype='int')
    options.x_overlap = inun_lib.configget(config, 'tiling',
                                  'x_overlap', 1000, datatype='int')
    options.y_overlap = inun_lib.configget(config, 'tiling',
                                  'y_overlap', 1000, datatype='int')
    options.iterations = inun_lib.configget(config, 'inundation',
                                  'iterations', 20, datatype='int')
    options.initial_level = inun_lib.configget(config, 'inundation',
                                  'initial_level', 32., datatype='float')
    options.area_multiplier = inun_lib.configget(config, 'inundation',
                                  'area_multiplier', 1., datatype='float')
    logger.info('DEM file: {:s}'.format(options.dem_file))
    logger.info('LDD file: {:s}'.format(options.ldd_file))
    logger.info('Columns per tile: {:d}'.format(options.x_tile))
    logger.info('Rows per tile: {:d}'.format(options.y_tile))
    logger.info('Columns overlap: {:d}'.format(options.x_overlap))
    logger.info('Rows overlap: {:d}'.format(options.y_overlap))
    metadata_global = {}
    # add metadata from the section [metadata]
    meta_keys = config.options('metadata_global')
    for key in meta_keys:
        metadata_global[key] = config.get('metadata_global', key)
    # add a number of metadata variables that are mandatory
    metadata_global['config_file'] = os.path.abspath(options.inifile)
    metadata_var = {}
    metadata_var['units'] = 'm'
    metadata_var['standard_name'] = 'water_surface_height_above_reference_datum'
    metadata_var['long_name'] = 'Coastal flooding'
    metadata_var['comment'] = 'water_surface_reference_datum_altitude is given in file {:s}'.format(options.dem_file)
    if not os.path.exists(options.dem_file):
        logger.error('path to dem file {:s} cannot be found'.format(options.dem_file))
        sys.exit(1)
    if not os.path.exists(options.ldd_file):
        logger.error('path to ldd file {:s} cannot be found'.format(options.ldd_file))
        sys.exit(1)

    # Read extent from a GDAL compatible file
    try:
        extent = inun_lib.get_gdal_extent(options.dem_file)
    except:
        msg = 'Input file {:s} not a gdal compatible file'.format(options.dem_file)
        inun_lib.close_with_error(logger, ch, msg)
        sys.exit(1)

    try:
        x, y = inun_lib.get_gdal_axes(options.dem_file, logging=logger)
        srs = inun_lib.get_gdal_projection(options.dem_file, logging=logger)
    except:
        msg = 'Input file {:s} not a gdal compatible file'.format(options.dem_file)
        inun_lib.close_with_error(logger, ch, msg)
        sys.exit(1)

    # read history from flood file
    if options.file_format == 0:
        a = nc.Dataset(options.flood_map, 'r')
        metadata_global['history'] = 'Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}'.format(os.path.abspath(options.flood_map), a.history)
        a.close()
    else:
        metadata_global['history'] = 'Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}'.format(os.path.abspath(options.flood_map), 'PCRaster file, no history')

    # first write subcatch maps and hand maps
    ############### TODO ######
    # setup a HAND file
    dem_name = os.path.split(options.dem_file)[1].split('.')[0]
    hand_file = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, options.hand_strahler))
    if not(os.path.isfile(hand_file)):
    # hand file does not exist yet! Generate it, otherwise skip!
        logger.info('HAND file {:s} setting up...please wait...'.format(hand_file))
        hand_file_tmp = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif.tmp'.format(dem_name, options.hand_strahler))
        ds_hand = inun_lib.prepare_gdal(hand_file_tmp, x, y, logging=logger, srs=srs)
        band_hand = ds_hand.GetRasterBand(1)

        # Open terrain data for reading
        ds_dem, rasterband_dem = inun_lib.get_gdal_rasterband(options.dem_file)
        ds_ldd, rasterband_ldd = inun_lib.get_gdal_rasterband(options.ldd_file)
        ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(options.stream_file)
        n = 0
        for x_loop in range(0, len(x), options.x_tile):
            x_start = np.maximum(x_loop, 0)
            x_end = np.minimum(x_loop + options.x_tile, len(x))
            # determine actual overlap for cutting
            for y_loop in range(0, len(y), options.y_tile):
                x_overlap_min = x_start - np.maximum(x_start - options.x_overlap, 0)
                x_overlap_max = np.minimum(x_end + options.x_overlap, len(x)) - x_end
                n += 1
                # print('tile {:001d}:'.format(n))
                y_start = np.maximum(y_loop, 0)
                y_end = np.minimum(y_loop + options.y_tile, len(y))
                y_overlap_min = y_start - np.maximum(y_start - options.y_overlap, 0)
                y_overlap_max = np.minimum(y_end + options.y_overlap, len(y)) - y_end
                # cut out DEM
                logger.debug('Computing HAND for xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}'.format(x_start, x_end,y_start, y_end))
                terrain = rasterband_dem.ReadAsArray(x_start - x_overlap_min,
                                                     y_start - y_overlap_min,
                                                     (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                     (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                     )

                drainage = rasterband_ldd.ReadAsArray(x_start - x_overlap_min,
                                                     y_start - y_overlap_min,
                                                     (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                     (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                     )
                stream = rasterband_stream.ReadAsArray(x_start - x_overlap_min,
                                                       y_start - y_overlap_min,
                                                       (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                       (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                       )
                # write to temporary file
                terrain_temp_file = os.path.join(options.dest_path, 'terrain_temp.map')
                drainage_temp_file = os.path.join(options.dest_path, 'drainage_temp.map')
                stream_temp_file = os.path.join(options.dest_path, 'stream_temp.map')
                if rasterband_dem.GetNoDataValue() is not None:
                    inun_lib.gdal_writemap(terrain_temp_file, 'PCRaster',
                                      np.arange(0, terrain.shape[1]),
                                      np.arange(0, terrain.shape[0]),
                                      terrain, rasterband_dem.GetNoDataValue(),
                                      gdal_type=gdal.GDT_Float32,
                                      logging=logger)
                else:
                    # in case no nodata value is found
                    logger.warning('No nodata value found in {:s}. assuming -9999'.format(options.dem_file))
                    inun_lib.gdal_writemap(terrain_temp_file, 'PCRaster',
                                      np.arange(0, terrain.shape[1]),
                                      np.arange(0, terrain.shape[0]),
                                      terrain, -9999.,
                                      gdal_type=gdal.GDT_Float32,
                                      logging=logger)

                inun_lib.gdal_writemap(drainage_temp_file, 'PCRaster',
                                  np.arange(0, terrain.shape[1]),
                                  np.arange(0, terrain.shape[0]),
                                  drainage, rasterband_ldd.GetNoDataValue(),
                                  gdal_type=gdal.GDT_Int32,
                                  logging=logger)
                inun_lib.gdal_writemap(stream_temp_file, 'PCRaster',
                                  np.arange(0, terrain.shape[1]),
                                  np.arange(0, terrain.shape[0]),
                                  stream, rasterband_ldd.GetNoDataValue(),
                                  gdal_type=gdal.GDT_Int32,
                                  logging=logger)
                # read as pcr objects
                pcr.setclone(terrain_temp_file)
                terrain_pcr = pcr.readmap(terrain_temp_file)
                drainage_pcr = pcr.lddrepair(pcr.ldd(pcr.readmap(drainage_temp_file)))  # convert to ldd type map
                stream_pcr = pcr.scalar(pcr.readmap(stream_temp_file))  # convert to ldd type map

                # compute streams
                stream_ge, subcatch = inun_lib.subcatch_stream(drainage_pcr, stream_pcr, options.hand_strahler) # generate streams

                basin = pcr.boolean(subcatch)
                hand_pcr, dist_pcr = inun_lib.derive_HAND(terrain_pcr, drainage_pcr, 3000, rivers=pcr.boolean(stream_ge), basin=basin)
                # convert to numpy
                hand = pcr.pcr2numpy(hand_pcr, -9999.)
                # cut relevant part
                if y_overlap_max == 0:
                    y_overlap_max = -hand.shape[0]
                if x_overlap_max == 0:
                    x_overlap_max = -hand.shape[1]
                hand_cut = hand[0+y_overlap_min:-y_overlap_max, 0+x_overlap_min:-x_overlap_max]

                band_hand.WriteArray(hand_cut, x_start, y_start)
                os.unlink(terrain_temp_file)
                os.unlink(drainage_temp_file)
                band_hand.FlushCache()
        ds_dem = None
        ds_ldd = None
        ds_stream = None
        band_hand.SetNoDataValue(-9999.)
        ds_hand = None
        logger.info('Finalizing {:s}'.format(hand_file))
        # rename temporary file to final hand file
        os.rename(hand_file_tmp, hand_file)
    else:
        logger.info('HAND file {:s} already exists...skipping...'.format(hand_file))

    #####################################################################################
    #  HAND file has now been prepared, moving to flood mapping part                    #
    #####################################################################################
    # load the staticmaps needed to estimate volumes across all
    xax, yax, riv_length, fill_value = inun_lib.gdal_readmap(options.riv_length_file, 'GTiff')
    riv_length = np.ma.masked_where(riv_length==fill_value, riv_length)
    xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(options.riv_width_file, 'GTiff')
    riv_width[riv_width == fill_value] = 0

    x_res = np.abs((xax[-1]-xax[0])/(len(xax)-1))
    y_res = np.abs((yax[-1]-yax[0])/(len(yax)-1))

    flood_folder = os.path.join(options.dest_path, case_name)
    flood_vol_map = os.path.join(flood_folder, '{:s}_vol.tif'.format(os.path.split(options.flood_map)[1].split('.')[0]))
    if not(os.path.isdir(flood_folder)):
        os.makedirs(flood_folder)
    inun_file_tmp = os.path.join(flood_folder, '{:s}.tif.tmp'.format(case_name))
    inun_file = os.path.join(flood_folder, '{:s}.tif'.format(case_name))
    hand_temp_file = os.path.join(flood_folder, 'hand_temp.map')
    drainage_temp_file = os.path.join(flood_folder, 'drainage_temp.map')
    stream_temp_file = os.path.join(flood_folder, 'stream_temp.map')
    flood_vol_temp_file = os.path.join(flood_folder, 'flood_warp_temp.tif')
    # load the data with river levels and compute the volumes
    if options.file_format == 0:
        # assume we need the maximum value in a NetCDF time series grid
        a = nc.Dataset(options.flood_map, 'r')
        xax = a.variables['x'][:]
        yax = a.variables['y'][:]

        flood_series = a.variables[options.flood_variable][:]
        flood_data = flood_series.max(axis=0)
        if np.ma.is_masked(flood_data):
            flood = flood_data.data
            flood[flood_data.mask] = 0
        if yax[-1] > yax[0]:
            yax = np.flipud(yax)
            flood = np.flipud(flood)
        a.close()
    elif options.file_format == 1:
        xax, yax, flood, flood_fill_value = inun_lib.gdal_readmap(options.flood_map, 'PCRaster')
        flood[flood==flood_fill_value] = 0.
    #res_x = x[1]-x[0]
    #res_y = y[1]-y[0]

    # load the bankfull depths
    if options.bankfull_map == '':
        bankfull = np.zeros(flood.shape)
    else:
        if options.file_format == 0:
            a = nc.Dataset(options.bankfull_map, 'r')
            xax = a.variables['x'][:]
            yax = a.variables['y'][:]
            bankfull = a.variables[options.flood_variable][0, :, :]
            if yax[-1] > yax[0]:
                yax = np.flipud(yax)
                bankfull = np.flipud(bankful)
            a.close()
        elif options.file_format == 1:
            xax, yax, bankfull, bankfull_fill_value = inun_lib.gdal_readmap(options.bankfull_map, 'PCRaster')
#     flood = bankfull*2
    # res_x = 2000
    # res_y = 2000
    # subtract the bankfull water level to get flood levels (above bankfull)
    flood_vol = np.maximum(flood-bankfull, 0)
    flood_vol_m = riv_length*riv_width*flood_vol/(x_res * y_res)  # volume expressed in meters water disc (1e6 is the surface area of one wflow grid cell)
    flood_vol_m_data = flood_vol_m.data
    flood_vol_m_data[flood_vol_m.mask] = -999.
    print('Saving water layer map to {:s}'.format(flood_vol_map))
    # write to a tiff file
    inun_lib.gdal_writemap(flood_vol_map, 'GTiff', xax, yax, np.maximum(flood_vol_m_data, 0), -999.)
    ds_hand, rasterband_hand = inun_lib.get_gdal_rasterband(hand_file)
    ds_ldd, rasterband_ldd = inun_lib.get_gdal_rasterband(options.ldd_file)
    ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(options.stream_file)

    logger.info('Preparing flood map in {:s} ...please wait...'.format(inun_file))
    ds_inun = inun_lib.prepare_gdal(inun_file_tmp, x, y, logging=logger, srs=srs)
    band_inun = ds_inun.GetRasterBand(1)

    # loop over all the tiles
    n = 0
    for x_loop in range(0, len(x), options.x_tile):
        x_start = np.maximum(x_loop, 0)
        x_end = np.minimum(x_loop + options.x_tile, len(x))
        # determine actual overlap for cutting
        for y_loop in range(0, len(y), options.y_tile):
            x_overlap_min = x_start - np.maximum(x_start - options.x_overlap, 0)
            x_overlap_max = np.minimum(x_end + options.x_overlap, len(x)) - x_end
            n += 1
            # print('tile {:001d}:'.format(n))
            y_start = np.maximum(y_loop, 0)
            y_end = np.minimum(y_loop + options.y_tile, len(y))
            y_overlap_min = y_start - np.maximum(y_start - options.y_overlap, 0)
            y_overlap_max = np.minimum(y_end + options.y_overlap, len(y)) - y_end
            x_tile_ax = x[x_start - x_overlap_min:x_end + x_overlap_max]
            y_tile_ax = y[y_start - y_overlap_min:y_end + y_overlap_max]

            # cut out DEM
            logger.debug('handling xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}'.format(x_start, x_end, y_start, y_end))
            hand = rasterband_hand.ReadAsArray(x_start - x_overlap_min,
                                                 y_start - y_overlap_min,
                                                 (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                 (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                 )

            drainage = rasterband_ldd.ReadAsArray(x_start - x_overlap_min,
                                                 y_start - y_overlap_min,
                                                 (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                 (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                 )
            stream = rasterband_stream.ReadAsArray(x_start - x_overlap_min,
                                                   y_start - y_overlap_min,
                                                   (x_end + x_overlap_max) - (x_start - x_overlap_min),
                                                   (y_end + y_overlap_max) - (y_start - y_overlap_min)
                                                   )
            print('len x-ax: {:d} len y-ax {:d} x-shape {:d} y-shape {:d}'.format(len(x_tile_ax), len(y_tile_ax), hand.shape[1], hand.shape[0]))
            inun_lib.gdal_writemap(hand_temp_file, 'PCRaster',
                              x_tile_ax,
                              y_tile_ax,
                              hand, rasterband_hand.GetNoDataValue(),
                              gdal_type=gdal.GDT_Float32,
                              logging=logger)
            inun_lib.gdal_writemap(drainage_temp_file, 'PCRaster',
                              x_tile_ax,
                              y_tile_ax,
                              drainage, rasterband_ldd.GetNoDataValue(),
                              gdal_type=gdal.GDT_Int32,
                              logging=logger)
            inun_lib.gdal_writemap(stream_temp_file, 'PCRaster',
                              x_tile_ax,
                              y_tile_ax,
                              stream, rasterband_stream.GetNoDataValue(),
                              gdal_type=gdal.GDT_Int32,
                              logging=logger)
            # read as pcr objects
            pcr.setclone(hand_temp_file)
            hand_pcr = pcr.readmap(hand_temp_file)
            drainage_pcr = pcr.lddrepair(pcr.ldd(pcr.readmap(drainage_temp_file)))  # convert to ldd type map
            stream_pcr = pcr.scalar(pcr.readmap(drainage_temp_file))  # convert to ldd type map
            # prepare a subcatchment map

            stream_ge, subcatch = inun_lib.subcatch_stream(drainage_pcr, stream_pcr, options.catchment_strahler) # generate subcatchments
            drainage_surf = pcr.ifthen(stream_ge > 0, pcr.accuflux(drainage_pcr, 1))  # proxy of drainage surface inaccurate at tile edges
           # compute weights for spreadzone (1/drainage_surf)
            subcatch = pcr.spreadzone(subcatch, 0, 0)

            # TODO check weighting scheme, perhaps not necessary
            # weight = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(drainage_surf), 0), 0, 0))
            # subcatch_fill = pcr.scalar(pcr.spreadzone(subcatch, 0, weight))
            # # cover subcatch with subcatch_fill
            # pcr.report(weight, 'weight_{:02d}.map'.format(n))
            # pcr.report(subcatch, 'subcatch_{:02d}.map'.format(n))
            # pcr.report(pcr.nominal(subcatch_fill), 'subcatch_fill_{:02d}.map'.format(n))
            inun_lib.gdal_warp(flood_vol_map, hand_temp_file, flood_vol_temp_file, gdal_interp=gdalconst.GRA_NearestNeighbour) # ,
            x_tile_ax, y_tile_ax, flood_meter, fill_value = inun_lib.gdal_readmap(flood_vol_temp_file, 'GTiff')
            # convert meter depth to volume [m3]
            flood_vol = pcr.numpy2pcr(pcr.Scalar, flood_meter, fill_value)*((x_tile_ax[1] - x_tile_ax[0]) * (y_tile_ax[0] - y_tile_ax[1]))  # resolution of SRTM *1166400000.
            ## now we have some nice volume. Now we need to redistribute!
            inundation_pcr = inun_lib.volume_spread(drainage_pcr, hand_pcr, subcatch, flood_vol,
                                           volume_thres=0., iterations=options.iterations,
                                           area_multiplier=options.area_multiplier) # 1166400000.
            inundation = pcr.pcr2numpy(inundation_pcr, -9999.)
            # cut relevant part
            if y_overlap_max == 0:
                y_overlap_max = -inundation.shape[0]
            if x_overlap_max == 0:
                x_overlap_max = -inundation.shape[1]
            inundation_cut = inundation[0+y_overlap_min:-y_overlap_max, 0+x_overlap_min:-x_overlap_max]
            # inundation_cut
            band_inun.WriteArray(inundation_cut, x_start, y_start)
            band_inun.FlushCache()
            # clean up
            os.unlink(flood_vol_temp_file)
            os.unlink(drainage_temp_file)
            os.unlink(hand_temp_file)

            # if n == 35:
            #     band_inun.SetNoDataValue(-9999.)
            #     ds_inun = None
            #     sys.exit(0)
    os.unlink(flood_vol_map)

    logger.info('Finalizing {:s}'.format(inun_file))
    # add the metadata to the file and band
    band_inun.SetNoDataValue(-9999.)
    ds_inun.SetMetadata(metadata_global)
    band_inun.SetMetadata(metadata_var)
    ds_inun = None
    ds_hand = None
    ds_ldd = None
    # rename temporary file to final hand file
    if os.path.isfile(inun_file):
        # remove an old result if available
        os.unlink(inun_file)
    os.rename(inun_file_tmp, inun_file)

    logger.info('Done! Thank you for using hand_contour_inun.py')
    logger, ch = inun_lib.closeLogger(logger, ch)
    del logger, ch
    sys.exit(0)
예제 #35
0
def loadsetclone(name):
    """ Load 'MaskMap' and set as clone
        
    :param name: name of the key in Settings.xml containing path and name of mask map as string
    :return: map: mask map (False=include in modelling; True=exclude from modelling) as pcraster
    """
    settings = LisSettings.instance()
    binding = settings.binding
    flags = settings.flags
    filename = os.path.normpath(binding[name])
    if not os.path.exists(filename):
        raise LisfloodError('File not existing: {}'.format(filename))
    coord = filename.split()  # returns a list of all the words in the string
    if len(coord) == 5:
        # changed order of x, y i- in setclone y is first in Lisflood
        # settings x is first
        # setclone row col cellsize xupleft yupleft
        try:
            setclone(int(coord[1]), int(coord[0]), float(coord[2]),
                     float(coord[3]), float(coord[4]))  # CM: pcraster
        except:
            rem = "[" + str(coord[0]) + " " + str(coord[1]) + " " + str(
                coord[2]) + " " + str(coord[3]) + " " + str(coord[4]) + "]"
            msg = "Maskmap: " + rem + \
                  " are not valid coordinates (col row cellsize xupleft yupleft)"
            raise LisfloodError(msg)
        mapnp = np.ones((int(coord[1]), int(coord[0])))
        map_out = numpy2pcr(Boolean, mapnp, -9999)
    elif len(coord) == 1:
        # read information on clone map from map (pcraster or netcdf)
        try:
            # try to read a pcraster map
            iterSetClonePCR(filename)
            map_out = pcraster.boolean(iterReadPCRasterMap(filename))
            flagmap = True
            mapnp = pcr2numpy(map_out, np.nan)
        except Exception as e:
            # FIXME manage exceptions and print type of error
            # print(str(e))
            # print(type(e))
            # try to read a netcdf file
            filename = os.path.splitext(binding[name])[0] + '.nc'
            nf1 = iterOpenNetcdf(filename, "", "r")
            value = listitems(
                nf1.variables)[-1][0]  # get the last variable name
            if 'x' in nf1.variables:
                x1 = nf1.variables['x'][0]
                x2 = nf1.variables['x'][1]
                y1 = nf1.variables['y'][0]
            else:
                x1 = nf1.variables['lon'][0]
                x2 = nf1.variables['lon'][1]
                y1 = nf1.variables['lat'][0]

            cell_size = round(np.abs(x2 - x1), 4)
            nr_rows, nr_cols = nf1.variables[
                value].shape  # just use shape to know rows and cols...
            x = x1 - cell_size / 2
            y = y1 + cell_size / 2
            mapnp = np.array(nf1.variables[value][0:nr_rows, 0:nr_cols])
            nf1.close()
            # setclone  row col cellsize xupleft yupleft
            setclone(nr_rows, nr_cols, cell_size, x, y)
            map_out = numpy2pcr(Boolean, mapnp, 0)
            flagmap = True

        if flags['checkfiles']:
            checkmap(name, filename, map_out, flagmap, 0)
    else:
        raise LisfloodError(
            "Maskmap: {} is not a valid mask map nor valid coordinates".format(
                name))
    _ = MaskAttrs(uuid.uuid4())  # init maskattrs
    # put in the ldd map
    # if there is no ldd at a cell, this cell should be excluded from modelling
    ldd = loadmap('Ldd', pcr=True)
    # convert ldd to numpy
    maskldd = pcr2numpy(ldd, np.nan)
    # convert numpy map to 8bit
    maskarea = np.bool8(mapnp)
    # compute mask (pixels in maskldd AND maskarea)
    mask = np.logical_not(np.logical_and(maskldd, maskarea))
    _ = MaskInfo(mask, map_out)  # MaskInfo init here

    if flags['nancheck']:
        nanCheckMap(ldd, binding['Ldd'], 'Ldd')
    return map_out
def main():
	#-initialization
	# MVs
	MV= -999.
	# minimum catchment size to process
	catchmentSizeLimit= 0.0
	# period of interest, start and end year
	startYear= 1961
	endYear= 2010
	# maps
	cloneMapFileName= '/data/hydroworld/PCRGLOBWB20/input30min/global/Global_CloneMap_30min.map'
	lddFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map'
	cellAreaFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/cellarea30min.map'
	# set clone 
	pcr.setclone(cloneMapFileName)
	# output
	outputPath= '/scratch/rens/reservedrecharge'
	percentileMapFileName= os.path.join(outputPath,'q%03d_cumsec.map')
	textFileName= os.path.join(outputPath,'groundwater_environmentalflow_%d.txt')
	fractionReservedRechargeMapFileName= os.path.join(outputPath,'fraction_reserved_recharge%d.map')
	fractionMinimumReservedRechargeMapFileName= os.path.join(outputPath,'minimum_fraction_reserved_recharge%d.map')
	# input
	inputPath= '/nfsarchive/edwin-emergency-backup-DO-NOT-DELETE/rapid/edwin/05min_runs_results/2015_04_27/non_natural_2015_04_27/global/netcdf/'
	# define data to be read from netCDF files
	ncData= {}
	variableName= 'totalRunoff'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'qloc')
	ncData[variableName]['annualAverage']= pcr.scalar(0)	
	variableName= 'gwRecharge'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'gwRecharge_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'gwrec')
	ncData[variableName]['annualAverage']= pcr.scalar(0)
	variableName= 'discharge'
	ncData[variableName]= {}
	ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc')
	ncData[variableName]['fileRoot']= os.path.join(outputPath,'qc')
	ncData[variableName]['annualAverage']= pcr.scalar(0)
	ncData[variableName]['mapStack']= np.array([])
	# percents and environmental flow condition set as percentile
	percents= range(10,110,10)
	environmentalFlowPercent= 10
	if environmentalFlowPercent not in percents:
		percents.append(environmentalFlowPercent)
		percents.sort()

	#-start
	# obtain attributes
	pcr.setclone(cloneMapFileName)
	cloneSpatialAttributes= spatialAttributes(cloneMapFileName)
	years= range(startYear,endYear+1)
	# output path
	if not os.path.isdir(outputPath):
		os.makedirs(outputPath)
	os.chdir(outputPath)
	# compute catchments
	ldd= pcr.readmap(lddFileName)
	cellArea= pcr.readmap(cellAreaFileName)
	catchments= pcr.catchment(ldd,pcr.pit(ldd))
	fractionWater= pcr.scalar(0.0) # temporary!
	lakeMask= pcr.boolean(0) # temporary!
	pcr.report(catchments,os.path.join(outputPath,'catchments.map'))
	maximumCatchmentID= int(pcr.cellvalue(pcr.mapmaximum(pcr.scalar(catchments)),1)[0])
	# iterate over years
	weight= float(len(years))**-1
	for year in years:
		#-echo year
		print ' - processing year %d' % year
		#-process data
		startDate= datetime.datetime(year,1,1)
		endDate= datetime.datetime(year,12,31)
		timeSteps= endDate.toordinal()-startDate.toordinal()+1
		dynamicIncrement= 1
		for variableName in ncData.keys():
			print '   extracting %s' % variableName,
			ncFileIn= ncData[variableName]['fileName']
			#-process data
			pcrDataSet= pcrObject(variableName, ncData[variableName]['fileRoot'],\
				ncFileIn,cloneSpatialAttributes, pcrVALUESCALE= pcr.Scalar, resamplingAllowed= True,\
				dynamic= True, dynamicStart= startDate, dynamicEnd= endDate, dynamicIncrement= dynamicIncrement, ncDynamicDimension= 'time')
			pcrDataSet.initializeFileInfo()
			pcrDataSet.processFileInfo()
			for fileInfo in pcrDataSet.fileProcessInfo.values()[0]:
				tempFileName= fileInfo[1]
				variableField= pcr.readmap(tempFileName)
				variableField= pcr.ifthen(pcr.defined(ldd),pcr.cover(variableField,0))
				if variableName == 'discharge':
					dayNumber= int(os.path.splitext(tempFileName)[1].strip('.'))
					date= datetime.date(year,1,1)+datetime.timedelta(dayNumber-1)
					numberDays= calendar.monthrange(year,date.month)[1]
					variableField= pcr.max(0,pcr.catchmenttotal(variableField*cellArea,ldd)/(numberDays*24*3600))
				ncData[variableName]['annualAverage']+= weight*variableField
				if 'mapStack' in ncData[variableName].keys():
					tempArray= pcr2numpy(variableField,MV)
					mask= tempArray != MV
					if ncData[variableName]['mapStack'].size != 0:
						ncData[variableName]['mapStack']= np.vstack((ncData[variableName]['mapStack'],tempArray[mask]))
					else:
						ncData[variableName]['mapStack']= tempArray[mask]
						coordinates= np.zeros((ncData[variableName]['mapStack'].size,2))
						pcr.setglobaloption('unitcell')
						tempArray= pcr2numpy(pcr.ycoordinate(pcr.boolean(1))+0.5,MV)
						coordinates[:,0]= tempArray[mask]
						tempArray= pcr2numpy(pcr.xcoordinate(pcr.boolean(1))+0.5,MV)
						coordinates[:,1]= tempArray[mask]      
				os.remove(tempFileName)				
			# delete object
			pcrDataSet= None
			del pcrDataSet
			# close line on screen
			print
	# report annual averages
	key= 'annualAverage'
	ncData['discharge'][key]/= 12
	for variableName in ncData.keys():
		ncData[variableName][key]= pcr.max(0,ncData[variableName][key])
		pcr.report(ncData[variableName][key],\
			os.path.join(outputPath,'%s_%s.map' % (variableName,key)))
	# remove aux.xml
	for tempFileName in os.listdir(outputPath):
		if 'aux.xml' in tempFileName:
			os.remove(tempFileName)
	# sort data
	print 'sorting discharge data'
	variableName= 'discharge'
	key= 'mapStack'
	indices= np.zeros((ncData[variableName][key].shape),np.uint)
	for iCnt in xrange(ncData[variableName][key].shape[1]):
		indices[:,iCnt]= ncData[variableName][key][:,iCnt].argsort(kind= 'mergesort')
		ncData[variableName][key][:,iCnt]= ncData[variableName][key][:,iCnt][indices[:,iCnt]]
	# extract values for percentiles
	print 'returning maps'
	for percent in percents:
		percentile= 0.01*percent
		index0= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0]))
		index1= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])+1)
		x0= float(index0)/ncData[variableName][key].shape[0]
		x1= float(index1)/ncData[variableName][key].shape[0]
		if x0 <> x1:
			y= ncData[variableName][key][index0,:]+(percentile-x0)*\
				 (ncData[variableName][key][index1,:]-ncData[variableName][key][index0,:])/(x1-x0)
		else:
			y= ncData[variableName][key][index0,:]
		# convert a slice of the stack into an array
		tempArray= np.ones((cloneSpatialAttributes.numberRows,cloneSpatialAttributes.numberCols))*MV
		for iCnt in xrange(coordinates.shape[0]):
			row= coordinates[iCnt,0]-1
			col= coordinates[iCnt,1]-1
			tempArray[row,col]= y[iCnt]
		variableField= numpy2pcr(pcr.Scalar,tempArray,MV)
		pcr.report(variableField,percentileMapFileName % percent)
		if percent == environmentalFlowPercent:
			ncData[variableName]['environmentalFlow']= variableField
		tempArray= None; variableField= None
		del tempArray, variableField
	# process environmental flow
	# initialize map of reserved recharge fraction
	fractionReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0))
	fractionMinimumReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0))
	textFile= open(textFileName % environmentalFlowPercent,'w')
	hStr= 'Environmental flow analysis per basin, resulting in a map of renewable, exploitable recharge, for the %d%s quantile of discharge\n' % (environmentalFlowPercent,'%')
	hStr+= 'Returns Q_%d/R, the fraction of reserved recharge needed to sustain fully the environental flow requirement defined as the %d percentile,\n' % (environmentalFlowPercent, environmentalFlowPercent)
	hStr+= 'and Q*_%d/R, a reduced fraction that takes the availability of surface water into account\n' % environmentalFlowPercent
	textFile.write(hStr)
	print hStr
	# create header to display on screen and write to file
	# reported are: 1: ID, 2: Area, 3: average discharge, 4: environmental flow, 5: average recharge,
	# 6: Q_%d/Q, 7: Q_%d/R_Avg, 8: R_Avg/Q_Avg, 9: Q*_%d/R_Avg
	hStr= '%6s,%15s,%15s,%15s,%15s,%15s,%15s,%15s,%15s\n' % \
		('ID','Area [km2]','Q_Avg [m3]','Q_%d [m3]' % environmentalFlowPercent ,'R_Avg [m3]','Q_%d/Q_Avg [-]' % environmentalFlowPercent,\
			'Q_%d/Q_Avg [-]' % environmentalFlowPercent,'R_Avg/Q_Avg [-]','Q*_%d/Q_Avg [-]' % environmentalFlowPercent)
	textFile.write(hStr)
	print hStr
	for catchment in xrange(1,maximumCatchmentID+1):
		# create catchment mask and check whether it does not coincide with a lake
		catchmentMask= catchments == catchment
		catchmentSize= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,cellArea*1.e-6)),1)[0]
		#~ ##~ if pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(lakeMask))),1) <> \
				#~ ##~ pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(catchmentMask))),1)[0] and \
				#~ ##~ catchmentSize > catchmentSizeLimit:
		key= 'annualAverage'
		variableName= 'discharge'			
		if bool(pcr.cellvalue(pcr.maptotal(pcr.ifthen((ldd == 5) & catchmentMask,\
				pcr.scalar(ncData[variableName][key] > 0))),1)[0]) and catchmentSize >= catchmentSizeLimit:
			# valid catchment, process
			# all volumes are in m3 per year
			key= 'annualAverage'
			catchmentAverageDischarge= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\
				ncData[variableName][key])),1)[0]*365.25*3600*24
			variableName= 'gwRecharge'
			catchmentRecharge= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\
				(1.-fractionWater)*cellArea)),1)[0]
			variableName= 'totalRunoff'
			catchmentRunoff= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\
				cellArea)),1)[0]
			key= 'environmentalFlow'
			variableName= 'discharge'			
			catchmentEnvironmentalFlow= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\
				ncData[variableName][key])),1)[0]*365.25*3600*24
			catchmentRunoff= max(catchmentRunoff,catchmentEnvironmentalFlow)
			if catchmentAverageDischarge > 0.:
				fractionEnvironmentalFlow= catchmentEnvironmentalFlow/catchmentAverageDischarge
				fractionGroundWaterContribution= catchmentRecharge/catchmentAverageDischarge
			else:
				fractionEnvironmentalFlow= 0.
				fractionGroundWaterContribution= 0.
			if catchmentRecharge > 0:
				fractionReservedRecharge= min(1,catchmentEnvironmentalFlow/catchmentRecharge)
			else:
				fractionReservedRecharge= 1.0
			fractionMinimumReservedRecharge= (fractionReservedRecharge+fractionGroundWaterContribution-\
				fractionReservedRecharge*fractionGroundWaterContribution)*fractionReservedRecharge
			#~ # echo to screen, and write to file and map
			wStr= '%6s,%15.1f,%15.6g,%15.6g,%15.6g,%15.6f,%15.6f,%15.6f,%15.6f\n' % \
				(catchment,catchmentSize,catchmentAverageDischarge,catchmentEnvironmentalFlow,catchmentRecharge,\
					fractionEnvironmentalFlow,fractionReservedRecharge,fractionGroundWaterContribution,fractionMinimumReservedRecharge)
			print wStr
			textFile.write(wStr)
			# update maps
			fractionReservedRechargeMap= pcr.ifthenelse(catchmentMask,\
				pcr.scalar(fractionReservedRecharge),fractionReservedRechargeMap)
			fractionMinimumReservedRechargeMap= pcr.ifthenelse(catchmentMask,\
				pcr.scalar(fractionMinimumReservedRecharge),fractionMinimumReservedRechargeMap)
	#-report map and close text file
	pcr.report(fractionReservedRechargeMap,fractionReservedRechargeMapFileName % environmentalFlowPercent)
	pcr.report(fractionMinimumReservedRechargeMap,fractionMinimumReservedRechargeMapFileName % environmentalFlowPercent)
	# close text file
	textFile.close()
	# finished
	print 'all done!'
예제 #37
0
def netcdf2PCRobjClone(ncFile,varName,dateInput,\
                       useDoy = None,
                       cloneMapFileName  = None,\
                       LatitudeLongitude = True,\
                       specificFillValue = None):
    # 
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    # Get netCDF file and variable name:
    
    print ncFile
    
    if ncFile in filecache.keys():
        f = filecache[ncFile]
        print "Cached: ", ncFile
    else:
        f = nc.Dataset(ncFile)
        filecache[ncFile] = f
        print "New: ", ncFile
    
    varName = str(varName)
    
    if LatitudeLongitude == True:
        try:
            f.variables['lat'] = f.variables['latitude']
            f.variables['lon'] = f.variables['longitude']
        except:
            pass
    
    if varName == "evapotranspiration":        
        try:
            f.variables['evapotranspiration'] = f.variables['referencePotET']
        except:
            pass

    # date
    date = dateInput
    if useDoy == "Yes": 
        print('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
        idx = int(dateInput) - 1
    elif useDoy == "month":  # PS: WE NEED THIS ONE FOR NETCDF FILES that contain only 12 monthly values (e.g. cropCoefficientWaterNC).
        print('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
        # make sure that date is in the correct format
        if isinstance(date, str) == True: date = \
                        datetime.datetime.strptime(str(date),'%Y-%m-%d') 
        idx = int(date.month) - 1
    else:
        # make sure that date is in the correct format
        if isinstance(date, str) == True: date = \
                        datetime.datetime.strptime(str(date),'%Y-%m-%d') 
        date = datetime.datetime(date.year,date.month,date.day)
        if useDoy == "yearly":
            date  = datetime.datetime(date.year,int(1),int(1))
        if useDoy == "monthly":
            date = datetime.datetime(date.year,date.month,int(1))
        if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
            # if the desired year is not available, use the first year or the last year that is available
            first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
            last_year_in_nc_file  =  findLastYearInNCTime(f.variables['time'])
            #
            if date.year < first_year_in_nc_file:  
                date = datetime.datetime(first_year_in_nc_file,date.month,date.day)
                msg  = "\n"
                msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                msg += "The date "+str(dateInput)+" is NOT available. "
                msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
                msg += "\n"
                print(msg)
            if date.year > last_year_in_nc_file:  
                date = datetime.datetime(last_year_in_nc_file,date.month,date.day)
                msg  = "\n"
                msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                msg += "The date "+str(dateInput)+" is NOT available. "
                msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
                msg += "\n"
                print(msg)
        try:
            idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
                                select ='exact')
            msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
            print(msg)
        except:
            msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
            print(msg)
            try:                                  
                idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
                                    select = 'before')
                msg  = "\n"
                msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
                msg += "\n"
            except:
                idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
                                    select = 'after')
                msg  = "\n"
                msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
                msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
                msg += "\n"
            print(msg)
                                                  
    idx = int(idx)                                                  

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF) 
        cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0]-0.5*cellsizeInput
        yULInput = f.variables['lat'][0]+0.5*cellsizeInput
        # check whether both maps have the same attributes 
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][int(idx),:,:]       # still original data
    factor = 1                          # needed in regridData2FinerGrid

    if sameClone == False:
        
        print('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
        # crop to cloneMap:
        #~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
        minX    = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
        xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
        xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
        #~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
        minY    = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
        yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
        yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
        cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
        factor = int(float(cellsizeInput)/float(cellsizeClone))
    
    # convert to PCR object and close f
    if specificFillValue != None:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(specificFillValue))
    else:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))
                  
    #f.close();
    f = None ; cropData = None 
    # PCRaster object
    return (outPCR)
예제 #38
0
def loadmap(name,
            pcr=False,
            lddflag=False,
            timestampflag='exact',
            averageyearflag=False):
    """ Load a static map either value or pcraster map or netcdf (single or stack)
    
    Load a static map either value or pcraster map or netcdf (single or stack)
    If a netCDF stack is loaded, map is read according to timestepInit date (i.e. model time step). If timestepInit is a
    step number, step number is converted to date (referred to CalendarDayStart in settings.xml). Then date is used to
    read time step from netCDF file.
    if timestampflag = 'closest' and loadmap is reading a NetCDF stack, the timestep with the closest timestamp will be
    loaded if the exact one is not available.
    
    :param name: name of key in Settings.xml input file containing path and name of the map file (as string)
    :param pcr: flag for output maps in pcraster format 
    :param lddflag: flag for local drain direction map (CM??)
    :param timestampflag: look for exact time stamp in netcdf file ('exact') or for the closest (left) time stamp available ('closest')
    :param averageyearflag: if True, use "average year" netcdf file over the entire model simulation period
    :return: map or mapC
    :except: pcr: maps must have the same size of clone.map
             netCDF: time step timestepInit must be included into the stack 
    """
    # name of the key in Settimgs.xml file containing path and name of the map file
    settings = LisSettings.instance()
    binding = settings.binding
    flags = settings.flags
    value = binding[name]
    # path and name of the map file
    filename = value
    load = False
    pcrmap = False
    # try reading in PCRaster map format
    try:
        # try reading constant value
        mapC = float(value)
        flagmap = False
        load = True
        if pcr: map = mapC
    except ValueError:
        try:
            # try reading pcraster map exploiting the iterAccess class
            map = iterReadPCRasterMap(value)
            flagmap = True
            load = True
            pcrmap = True
        except:
            load = False

    if load and pcrmap:
        #map is loaded and it is in pcraster format
        try:
            # test if map is same size as clone map, if not it will make an error
            test = pcraster.scalar(map) + pcraster.scalar(map)
        except:
            raise LisfloodError(
                "{} might be of a different size than clone size".format(
                    value))
    # if failed before try reading from netCDF map format
    if not load:
        # read a netcdf  (single one not a stack)
        filename = os.path.splitext(value)[0] + '.nc'
        # get mapextend of netcdf map and calculate the cutting
        cut0, cut1, cut2, cut3 = mapattrNetCDF(filename)
        # load netcdf map but only the rectangle needed
        nf1 = iterOpenNetcdf(filename, "", 'r')
        value = listitems(nf1.variables)[-1][0]
        # get the last variable name (it must be the variable to be read by Lisflood)
        if not settings.timestep_init:
            # if timestep_init is missing, read netcdf as single static map
            mapnp = nf1.variables[value][cut2:cut3, cut0:cut1]
        else:
            if 'time' in nf1.variables:
                # read a netcdf  (stack) - state files
                # get information from netCDF stack
                t_steps = nf1.variables[
                    'time'][:]  # get values for timesteps ([  0.,  24.,  48.,  72.,  96.])
                t_unit = nf1.variables[
                    'time'].units  # get unit (u'hours since 2015-01-01 06:00:00')
                t_cal = get_calendar_type(nf1)
                # get year from time unit in case average year is used
                if averageyearflag:
                    # get date of the first step in netCDF file containing average year values
                    first_date = num2date(t_steps[0], t_unit, t_cal)
                    # get year of the first step in netCDF file containing average year values
                    t_ref_year = first_date.year

                # select timestep to use for reading from netCDF stack based on timestep_init (state file time step)
                timestepI = calendar(settings.timestep_init,
                                     binding['calendar_type'])
                if isinstance(timestepI, datetime.datetime):
                    #reading dates in XML settings file
                    # get step id number in netCDF stack for timestepInit date
                    if averageyearflag:
                        #if using an average year don't care about the year in timestepIDate and change it to the netCDF first time step year
                        try:
                            timestepI = timestepI.replace(year=t_ref_year)
                        except:
                            timestepI = timestepI.replace(day=28)
                            timestepI = timestepI.replace(year=t_ref_year)
                    timestepI = date2num(timestepI,
                                         nf1.variables['time'].units)
                else:
                    # reading step numbers in XML file
                    # timestepI = int(timestepI) -1
                    begin = calendar(binding['CalendarDayStart'])
                    DtSec = float(binding['DtSec'])
                    DtDay = DtSec / 86400.
                    # Time step, expressed as fraction of day (same as self.var.DtSec and self.var.DtDay)
                    # get date for step number timestepI (referred to CalendarDayStart)
                    timestepIDate = begin + datetime.timedelta(
                        days=(timestepI - 1) * DtDay)
                    # get step id number in netCDF stack for step timestepInit
                    # timestepInit refers to CalenradDayStart
                    # timestepI now refers to first date in netCDF stack
                    if averageyearflag:
                        #using an average year, don't care about the year in timestepIDate and change it to the netCDF time unit year
                        try:
                            timestepIDate = timestepIDate.replace(
                                year=t_ref_year)
                        except:
                            #if simulation year is leap and average year is not, switch 29/2 with 28/2
                            timestepIDate = timestepIDate.replace(day=28)
                            timestepIDate = timestepIDate.replace(
                                year=t_ref_year)
                    timestepI = date2num(timestepIDate,
                                         units=t_unit,
                                         calendar=t_cal)

                if not (timestepI in nf1.variables['time'][:]):
                    if timestampflag == 'exact':
                        #look for exact time stamp when loading data
                        msg = "time step " + str(int(
                            timestepI) + 1) + " is not stored in " + filename
                        raise LisfloodError(msg)
                    elif timestampflag == 'closest':
                        #get the closest value
                        timestepInew = takeClosest(t_steps, timestepI)
                        #set timestepI to the closest available time step in netCDF file
                        timestepI = timestepInew

                itime = np.where(nf1.variables['time'][:] == timestepI)[0][0]
                mapnp = nf1.variables[value][itime, cut2:cut3, cut0:cut1]
            else:
                # read a netcdf (single one)
                mapnp = nf1.variables[value][cut2:cut3, cut0:cut1]

        # masking
        try:
            maskinfo = MaskInfo.instance()
            mapnp.mask = maskinfo.info.mask
        except KeyError as e:
            pass
        nf1.close()

        # if a map should be pcraster
        if pcr:
            # check if integer map (like outlets, lakes etc
            checkint = str(mapnp.dtype)
            if checkint == "int16" or checkint == "int32":
                mapnp[mapnp.mask] = -9999
                map = numpy2pcr(Nominal, mapnp, -9999)
            elif checkint == "int8":
                mapnp[mapnp < 0] = -9999
                map = numpy2pcr(Nominal, mapnp, -9999)
            else:
                mapnp[np.isnan(mapnp)] = -9999
                map = numpy2pcr(Scalar, mapnp, -9999)
            # if the map is a ldd
            if lddflag:
                map = pcraster.ldd(pcraster.nominal(map))
        else:
            mapC = compressArray(mapnp, pcr=False, name=filename)
        flagmap = True

    # pcraster map but it has to be an array
    if pcrmap and not pcr:
        mapC = compressArray(map, name=filename)

    if flags['checkfiles']:
        print(name, filename)
        if flagmap == False:
            checkmap(name, filename, mapC, flagmap, 0)
        elif pcr:
            checkmap(name, filename, map, flagmap, 0)
        else:
            print(name, mapC.size)
            if mapC.size > 0:
                map = decompress(mapC)
                checkmap(name, filename, map, flagmap, 0)
    if pcr:
        if flags['nancheck'] and name != 'Ldd':
            nanCheckMap(map, filename, name)
        return map
    elif isinstance(mapC, np.ndarray):
        return mapC.astype(float)
    else:
        if flags['nancheck'] and name != 'Ldd':
            nanCheckMap(mapC, filename, name)
        return mapC
예제 #39
0
def netcdf2PCRobjClone(ncFile,varName,dateInput,\
                       useDoy = None,
                       cloneMapFileName  = None,\
                       LatitudeLongitude = True,\
                       specificFillValue = None):
    # 
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    # Get netCDF file and variable name:
    
    #~ print ncFile
    
    if ncFile in filecache.keys():
        f = filecache[ncFile]
        #~ print "Cached: ", ncFile
    else:
        f = nc.Dataset(ncFile)
        filecache[ncFile] = f
        #~ print "New: ", ncFile
    
    varName = str(varName)
    if varName == "Automatic":
        for key in f.variables.keys():
            if key not in ['latitude', 'longitude', 'lat', 'lon', 'time']:
				varName = key  
				print varName                                                                                                                                                          

    if LatitudeLongitude == True:
        try:
            f.variables['lat'] = f.variables['latitude']
            f.variables['lon'] = f.variables['longitude']
        except:
            pass
    
    if varName == "evapotranspiration":        
        try:
            f.variables['evapotranspiration'] = f.variables['referencePotET']
        except:
            pass

    # date
    date = dateInput
    if useDoy == "Yes": 
        idx = dateInput - 1
    else:
        if isinstance(date, str) == True: date = \
                        datetime.datetime.strptime(str(date),'%Y-%m-%d') 
        date = datetime.datetime(date.year,date.month,date.day)
        # time index (in the netCDF file)
        if useDoy == "month":
            idx = int(date.month) - 1
        else:
            if useDoy == "yearly":\
                date = datetime.datetime(date.year,int(1),int(1))
            if useDoy == "monthly":\
                date = datetime.datetime(date.year,date.month,int(1))
            nctime = f.variables['time']  # A netCDF time variable object.
            idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
                                                select='exact')
    idx = int(idx)                                                  

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF) 
        cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0]-0.5*cellsizeInput
        yULInput = f.variables['lat'][0]+0.5*cellsizeInput
        # check whether both maps have the same attributes 
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][int(idx),:,:]       # still original data
    factor = 1                          # needed in regridData2FinerGrid

    if sameClone == False:
        # crop to cloneMap:
        #~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
        minX    = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
        xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
        xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
        #~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
        minY    = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
        yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
        yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
        cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]

        factor = int(round(float(cellsizeInput)/float(cellsizeClone)))

    # convert to PCR object and close f
    if specificFillValue != None:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(specificFillValue))
    else:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))
                  
    #f.close();
    f = None ; cropData = None 
    # PCRaster object
    return (outPCR)
예제 #40
0
def netcdf2PCRobjClone(ncFile,varName,dateInput,\
                       useDoy = None,
                       cloneMapFileName  = None,\
                       LatitudeLongitude = False,\
                       specificFillValue = None):
    #
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    # Get netCDF file and variable name:

    if ncFile in filecache.keys():
        f = filecache[ncFile]
        print "Cached: ", ncFile
    else:
        f = nc.Dataset(ncFile)
        filecache[ncFile] = f
        print "New: ", ncFile

    varName = str(varName)

    if LatitudeLongitude == True:
        try:
            f.variables['lat'] = f.variables['latitude']
            f.variables['lon'] = f.variables['longitude']
        except:
            pass

    if varName == "evapotranspiration":
        try:
            f.variables['evapotranspiration'] = f.variables['referencePotET']
        except:
            pass

    # date
    date = dateInput
    if useDoy == "Yes":
        idx = dateInput - 1
    else:
        if isinstance(date, str) == True:            date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
        date = datetime.datetime(date.year, date.month, date.day)
        # time index (in the netCDF file)
        if useDoy == "month":
            idx = int(date.month) - 1
        else:
            if useDoy == "yearly":                \
                                date = datetime.datetime(date.year,int(1),int(1))
            if useDoy == "monthly":                \
                                date = datetime.datetime(date.year,date.month,int(1))
            nctime = f.variables['time']  # A netCDF time variable object.
            idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
                                                select='exact')
    idx = int(idx)

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF)
        cellsizeInput = f.variables['lat'][0] - f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0] - 0.5 * cellsizeInput
        yULInput = f.variables['lat'][0] + 0.5 * cellsizeInput
        # check whether both maps have the same attributes
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][int(idx), :, :]  # still original data
    factor = 1  # needed in regridData2FinerGrid
    if sameClone == False:
        # crop to cloneMap:
        #~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
        minX = min(
            abs(f.variables['lon'][:] -
                (xULClone + 0.5 * cellsizeInput)))  # ; print(minX)
        xIdxSta = int(
            np.where(
                abs(f.variables['lon'][:] -
                    (xULClone + 0.5 * cellsizeInput)) == minX)[0])
        xIdxEnd = int(
            math.ceil(xIdxSta + colsClone / (cellsizeInput / cellsizeClone)))
        #~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
        minY = min(
            abs(f.variables['lat'][:] -
                (yULClone - 0.5 * cellsizeInput)))  # ; print(minY)
        yIdxSta = int(
            np.where(
                abs(f.variables['lat'][:] -
                    (yULClone - 0.5 * cellsizeInput)) == minY)[0])
        yIdxEnd = int(
            math.ceil(yIdxSta + rowsClone / (cellsizeInput / cellsizeClone)))
        cropData = f.variables[varName][idx, yIdxSta:yIdxEnd, xIdxSta:xIdxEnd]
        factor = int(float(cellsizeInput) / float(cellsizeClone))

    # convert to PCR object and close f
    if specificFillValue != None:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(specificFillValue))
    else:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))

    #f.close();
    f = None
    cropData = None
    # PCRaster object
    return (outPCR)
예제 #41
0
def netcdf2PCRobjCloneWind(ncFile,varName,dateInput,useDoy = None,
                       cloneMapFileName=None):
    # EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    
    # Get netCDF file and variable name:
    f = nc.Dataset(ncFile)
    varName = str(varName)
    
    # date
    date = dateInput
    if useDoy == "Yes": 
        idx = dateInput - 1
    else:
        if isinstance(date, str) == True: date = \
                        datetime.datetime.strptime(str(date),'%Y-%m-%d') 
        date = datetime.datetime(date.year,date.month,date.day, 0, 0)
        # time index (in the netCDF file)
        nctime = f.variables['time']  # A netCDF time variable object.
        idx = nc.date2index(date, nctime, select="exact")
    idx = int(idx)                                                  

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF) 
        cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0]-0.5*cellsizeInput
        yULInput = f.variables['lat'][0]+0.5*cellsizeInput
        # check whether both maps have the same attributes 
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][int(idx),:,:]       # still original data
    factor = 1                          # needed in regridData2FinerGrid
    if sameClone == False:
        # crop to cloneMap:
        xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
        xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
        yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
        yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
        cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
        factor = int(float(cellsizeInput)/float(cellsizeClone))
    
    # convert to PCR object and close f
    outPCR = pcr.numpy2pcr(pcr.Scalar, \
               regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))
    f.close();
    f = None ; cropData = None 
    # PCRaster object
    return (outPCR)    
예제 #42
0
def netcdf2PCRobjCloneWithoutTime(ncFile,varName,
                                  cloneMapFileName  = None,\
                                  LatitudeLongitude = False,\
                                  specificFillValue = None):
    #
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput
    # Get netCDF file and variable name:
    if ncFile in filecache.keys():
        f = filecache[ncFile]
        print "Cached: ", ncFile
    else:
        f = nc.Dataset(ncFile)
        filecache[ncFile] = f
        print "New: ", ncFile

    #print ncFile
    #f = nc.Dataset(ncFile)
    varName = str(varName)

    if LatitudeLongitude == True:
        try:
            f.variables['lat'] = f.variables['latitude']
            f.variables['lon'] = f.variables['longitude']
        except:
            pass

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF)
        cellsizeInput = f.variables['lat'][0] - f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0] - 0.5 * cellsizeInput
        yULInput = f.variables['lat'][0] + 0.5 * cellsizeInput
        # check whether both maps have the same attributes
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][:, :]  # still original data
    factor = 1  # needed in regridData2FinerGrid
    if sameClone == False:
        # crop to cloneMap:
        minX = min(
            abs(f.variables['lon'][:] -
                (xULClone + 0.5 * cellsizeInput)))  # ; print(minX)
        xIdxSta = int(
            np.where(
                abs(f.variables['lon'][:] -
                    (xULClone + 0.5 * cellsizeInput)) == minX)[0])
        xIdxEnd = int(
            math.ceil(xIdxSta + colsClone / (cellsizeInput / cellsizeClone)))
        minY = min(
            abs(f.variables['lat'][:] -
                (yULClone - 0.5 * cellsizeInput)))  # ; print(minY)
        yIdxSta = int(
            np.where(
                abs(f.variables['lat'][:] -
                    (yULClone - 0.5 * cellsizeInput)) == minY)[0])
        yIdxEnd = int(
            math.ceil(yIdxSta + rowsClone / (cellsizeInput / cellsizeClone)))
        cropData = f.variables[varName][yIdxSta:yIdxEnd, xIdxSta:xIdxEnd]
        factor = int(float(cellsizeInput) / float(cellsizeClone))

    # convert to PCR object and close f
    if specificFillValue != None:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(specificFillValue))
    else:
        outPCR = pcr.numpy2pcr(pcr.Scalar, \
                  regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))

    #~ # debug:
    #~ pcr.report(outPCR,"tmp.map")
    #~ print(varName)
    #~ os.system('aguila tmp.map')

    #f.close();
    f = None
    cropData = None
    # PCRaster object
    return (outPCR)
예제 #43
0
def netcdf2PCRobjCloneWind(ncFile,
                           varName,
                           dateInput,
                           useDoy=None,
                           cloneMapFileName=None):
    # EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
    # EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
    # --- with clone checking
    #     Only works if cells are 'square'.
    #     Only works if cellsizeClone <= cellsizeInput

    # Get netCDF file and variable name:
    f = nc.Dataset(ncFile)
    varName = str(varName)

    # date
    date = dateInput
    if useDoy == "Yes":
        idx = dateInput - 1
    else:
        if isinstance(date, str) == True:            date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
        date = datetime.datetime(date.year, date.month, date.day, 0, 0)
        # time index (in the netCDF file)
        nctime = f.variables['time']  # A netCDF time variable object.
        idx = nc.date2index(date, nctime, select="exact")
    idx = int(idx)

    sameClone = True
    # check whether clone and input maps have the same attributes:
    if cloneMapFileName != None:
        # get the attributes of cloneMap
        attributeClone = getMapAttributesALL(cloneMapFileName)
        cellsizeClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # get the attributes of input (netCDF)
        cellsizeInput = f.variables['lat'][0] - f.variables['lat'][1]
        cellsizeInput = float(cellsizeInput)
        rowsInput = len(f.variables['lat'])
        colsInput = len(f.variables['lon'])
        xULInput = f.variables['lon'][0] - 0.5 * cellsizeInput
        yULInput = f.variables['lat'][0] + 0.5 * cellsizeInput
        # check whether both maps have the same attributes
        if cellsizeClone != cellsizeInput: sameClone = False
        if rowsClone != rowsInput: sameClone = False
        if colsClone != colsInput: sameClone = False
        if xULClone != xULInput: sameClone = False
        if yULClone != yULInput: sameClone = False

    cropData = f.variables[varName][int(idx), :, :]  # still original data
    factor = 1  # needed in regridData2FinerGrid
    if sameClone == False:
        # crop to cloneMap:
        xIdxSta = int(
            np.where(f.variables['lon'][:] == xULClone +
                     0.5 * cellsizeInput)[0])
        xIdxEnd = int(
            math.ceil(xIdxSta + colsClone / (cellsizeInput / cellsizeClone)))
        yIdxSta = int(
            np.where(f.variables['lat'][:] == yULClone -
                     0.5 * cellsizeInput)[0])
        yIdxEnd = int(
            math.ceil(yIdxSta + rowsClone / (cellsizeInput / cellsizeClone)))
        cropData = f.variables[varName][idx, yIdxSta:yIdxEnd, xIdxSta:xIdxEnd]
        factor = int(float(cellsizeInput) / float(cellsizeClone))

    # convert to PCR object and close f
    outPCR = pcr.numpy2pcr(pcr.Scalar, \
               regridData2FinerGrid(factor,cropData,MV), \
                  float(f.variables[varName]._FillValue))
    f.close()
    f = None
    cropData = None
    # PCRaster object
    return (outPCR)
예제 #44
0
def regridMapFile2FinerGrid(rescaleFac, coarse):
    if rescaleFac == 1:
        return coarse
    return pcr.numpy2pcr(
        pcr.Scalar,
        regridData2FinerGrid(rescaleFac, pcr.pcr2numpy(coarse, MV), MV), MV)
def get_return_period_gumbel(p_zero_in_pcraster, loc_in_pcraster, scale_in_pcraster, flvol_in_pcraster, max_return_period = np.longdouble(1e9), max_return_period_that_can_be_assigned = 1000.):
    """
    Transforms a unique, or array of flood volumes into the belonging return
    periods, according to gumbel parameters (belonging to non-zero part of the
    distribution) and a zero probability
    Inputs:
        p_zero:        probability that flood volume is zero
        loc:           Gumbel location parameter (of non-zero part of distribution)
        scale:         Gumbel scale parameter (of non-zero part of distribution)
        flvol:         Flood volume that will be transformed to return period
        max_return_period: maximum return period considered. This maximum is needed to prevent that floating point
                        precision becomes a problem (default: 1e9)
    This function is copied from: https://repos.deltares.nl/repos/Hydrology/trunk/GLOFRIS/src/rp_bias_corr.py
    """
    
    np.seterr(divide='ignore')
    np.seterr(invalid='ignore')

    # convert all pcraster maps to numpy arrays
    p_zero  = np.longdouble(pcr.pcr2numpy(p_zero_in_pcraster, vos.MV))
    loc     = np.longdouble(pcr.pcr2numpy(loc_in_pcraster   , vos.MV))
    scale   = np.longdouble(pcr.pcr2numpy(scale_in_pcraster , vos.MV))
    flvol   = np.longdouble(pcr.pcr2numpy(flvol_in_pcraster , vos.MV))
    
    # maximum values for the given max_return_period
    max_p = 1.0-1.0/max_return_period
    max_p_residual = np.minimum(np.maximum((max_p-p_zero)/(1.0-p_zero), 0.0), 1.0)
    max_p_residual[p_zero >= max_p] = 0.0 
    max_reduced_variate = -np.log(-np.log((max_p_residual)))

    #~ print np.nanmin(max_p_residual)
    #~ print np.nanmax(max_p_residual)
    #~ print np.amin(max_p_residual)
    #~ print np.amax(max_p_residual)
#~ 
    #~ print np.nanmin(max_reduced_variate)
    #~ print np.nanmax(max_reduced_variate)
    #~ print np.amin(max_reduced_variate)
    #~ print np.amax(max_reduced_variate)

    # compute the gumbel reduced variate belonging to the Gumbel distribution (excluding any zero-values): reduced_variate = (flvol-loc)/scale
    # make sure that the reduced variate does not exceed the one
    reduced_variate = np.longdouble(np.minimum((flvol-loc)/scale, max_reduced_variate))

    #~ print np.nanmin(reduced_variate)
    #~ print np.nanmax(reduced_variate)
    #~ print np.amin(reduced_variate)
    #~ print np.amax(reduced_variate)
    
    # transform the reduced variate into a probability (residual after removing the zero volume probability)
    p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.longdouble(reduced_variate))), np.longdouble(0.0)), np.longdouble(1.0))
    #~ p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.longdouble(reduced_variate))), 0.0), 1.0)

    #~ print np.nanmin(p_residual)
    #~ print np.nanmax(p_residual)
    #~ print np.amin(p_residual)
    #~ print np.amax(p_residual)

    # transform from non-zero only distribution to zero-included distribution
    p = np.minimum(np.maximum(p_residual*(1.0 - p_zero) + p_zero, p_zero), max_p)  # never larger than max_p # 
    p = np.maximum(0.0, p)
    
    #~ print ""
    #~ print "p"
    #~ print np.nanmin(p)
    #~ print np.nanmax(p)
#~ 
    #~ print np.amin(p)
    #~ print np.amax(p)
    #~ print "p"
    #~ print ""

    # transform into a return period    
    return_period = 1.0/(1.0-p)
    
    # assign maximum return period for p_zero = 1.0 (value is always zero)
    return_period[p_zero == 1.0000] = max_return_period

    # limit return period to maximum return period that can be assigned
    return_period[return_period > max_return_period_that_can_be_assigned] = max_return_period_that_can_be_assigned

    # cell with mv will be still mv
    return_period[p_zero == vos.MV] = vos.MV
    
    #~ # test values (calculated in the original Hessel's script, not needed)
    #~ test_p = p == 1    
    #~ diff_p = 1.0 - p
    
    print np.nanmin(return_period)
    print np.nanmax(return_period)
    print np.amin(return_period)
    print np.amax(return_period)

    print np.nanmin(return_period[p_zero != vos.MV])
    print np.nanmax(return_period[p_zero != vos.MV])
    print np.amin(return_period[p_zero != vos.MV])
    print np.amax(return_period[p_zero != vos.MV])

    #~ pcr.report(pcr.numpy2pcr(pcr.Scalar, np.float64(return_period), vos.MV), "return_period.map")
    #~ cmd = "aguila " + "return_period.map"
    #~ os.system(cmd)

    return pcr.numpy2pcr(pcr.Scalar, np.float64(return_period), vos.MV)
예제 #46
0
def joinMaps(inputTuple):
    '''Merges maps starting from an input tuple that specifies the output map name, the number of rows\
 and the number rows, columns, ULL X and Y coordinates, cell length and the missing value identifer and a list of input maps'''
    outputFileName = inputTuple[0]
    nrRows = inputTuple[1]
    nrCols = inputTuple[2]
    xMin = inputTuple[3]
    yMax = inputTuple[4]
    cellLength = inputTuple[5]
    MV = inputTuple[6]
    fileNames = inputTuple[7]
    cloneFileName = inputTuple[8]
    #-echo to screen
    print 'combining files for %s' % outputFileName,
    #-get extent
    xMax = xMin + nrCols * cellLength
    yMin = yMax - nrRows * cellLength
    xCoordinates = xMin + np.arange(nrCols + 1) * cellLength
    yCoordinates = yMin + np.arange(nrRows + 1) * cellLength
    yCoordinates = np.flipud(yCoordinates)
    print 'between %.2f, %.2f and %.2f, %.2f' % (xMin, yMin, xMax, yMax)
    #-set output array
    variableArray = np.ones((nrRows, nrCols)) * MV
    #-iterate over maps
    for fileName in fileNames:
        print fileName
        attributeClone = getMapAttributesALL(fileName)
        cellLengthClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']
        # check whether both maps have the same attributes and process
        process, nd = checkResolution(cellLength, cellLengthClone)
        if process:
            #-get coordinates and locations
            sampleXMin = xULClone
            sampleXMax = xULClone + colsClone * cellLengthClone
            sampleYMin = yULClone - rowsClone * cellLengthClone
            sampleYMax = yULClone
            sampleXCoordinates = sampleXMin + np.arange(colsClone +
                                                        1) * cellLengthClone
            sampleYCoordinates = sampleYMin + np.arange(rowsClone +
                                                        1) * cellLengthClone
            sampleYCoordinates = np.flipud(sampleYCoordinates)
            sampleXMin = getMax(xMin, sampleXMin)
            sampleXMax = getMin(xMax, sampleXMax)
            sampleYMin = getMax(yMin, sampleYMin)
            sampleYMax = getMin(yMax, sampleYMax)
            sampleRow0 = getPosition(sampleYMin, sampleYCoordinates, nd)
            sampleRow1 = getPosition(sampleYMax, sampleYCoordinates, nd)
            sampleCol0 = getPosition(sampleXMin, sampleXCoordinates, nd)
            sampleCol1 = getPosition(sampleXMax, sampleXCoordinates, nd)
            sampleRow0, sampleRow1 = checkRowPosition(sampleRow0, sampleRow1)
            variableRow0 = getPosition(sampleYMin, yCoordinates, nd)
            variableRow1 = getPosition(sampleYMax, yCoordinates, nd)
            variableCol0 = getPosition(sampleXMin, xCoordinates, nd)
            variableCol1 = getPosition(sampleXMax, xCoordinates, nd)
            variableRow0, variableRow1 = checkRowPosition(
                variableRow0, variableRow1)
            #-read sample array
            setclone(fileName)
            sampleArray = pcr2numpy(readmap(fileName), MV)
            sampleNrRows, sampleNrCols = sampleArray.shape
            #-create mask
            mask= (variableArray[variableRow0:variableRow1,variableCol0:variableCol1] == MV) &\
             (sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1] <> MV)
            #-add values
            print ' adding values in %d, %d rows, columns from (x, y) %.3f, %.3f and %.3f, %.3f to position (row, col) %d, %d and %d, %d' %\
             (sampleNrRows, sampleNrCols,sampleXMin,sampleYMin,sampleXMax,sampleYMax,variableRow0,variableCol0,variableRow1,variableCol1)
            variableArray[variableRow0:variableRow1,variableCol0:variableCol1][mask]= \
             sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1][mask]
        else:
            print '%s does not match resolution and is not processed' % fileName
    #-report output map
    setclone(cloneFileName)
    report(numpy2pcr(Scalar, variableArray, MV), outputFileName)