Пример #1
0
def lddcreate_save(
    lddname,
    dem,
    force,
    corevolume=1e35,
    catchmentprecipitation=1e35,
    corearea=1e35,
    outflowdepth=1e35,
):
    """
    Creates an ldd if a file does not exists or if the force flag is used

    input:
        - lddname (name of the ldd to create)
        - dem (actual dem)
        - force (boolean to force recreation of the ldd)
        - outflowdepth (set to 10.0E35 normally but smaller if needed)

    Output:
        - the LDD

    """
    if os.path.exists(lddname) and not force:
        if Verbose:
            print(("Returning existing ldd", lddname))
            return pcr.readmap(lddname)
    else:
        if Verbose:
            print(("Creating ldd", lddname))
            LDD = pcr.lddcreate(dem, 10.0e35, outflowdepth, 10.0e35, 10.0e35)
            pcr.report(LDD, lddname)
            return LDD
Пример #2
0
 def reportState(self, variable, variableName):
     """
 Report a map into the state variable directory.
 """
     sample = str(self.currentSampleNumber())
     if re.search(".map", variableName):
         filename = variableName
     else:
         filename = frameworkBase.generateNameT(variableName, self.currentTimeStep())
     name = os.path.join(sample, "stateVar", filename)
     pcraster.report(variable, name)
Пример #3
0
def volume_spread(ldd, hand, subcatch, volume, volume_thres=0., area_multiplier=1., iterations=15):
    """
    Estimate 2D flooding from a 1D simulation per subcatchment reach
    Input:
        ldd -- pcraster object direction, local drain directions
        hand -- pcraster object float32, elevation data normalised to nearest drain
        subcatch -- pcraster object ordinal, subcatchments with IDs
        volume -- pcraster object float32, scalar flood volume (i.e. m3 volume outside the river bank within subcatchment)
        volume_thres=0. -- scalar threshold, at least this amount of m3 of volume should be present in a catchment
        area_multiplier=1. -- in case the maps are not in m2, set a multiplier other than 1. to convert
        iterations=15 -- number of iterations to use
    Output:
        inundation -- pcraster object float32, scalar inundation estimate
    """
    #initial values
    pcr.setglobaloption("unittrue")
    dem_min = pcr.areaminimum(hand, subcatch)  # minimum elevation in subcatchments
    # pcr.report(dem_min, 'dem_min.map')
    dem_norm = hand - dem_min
    # pcr.report(dem_norm, 'dem_norm.map')
    # surface of each subcatchment
    surface = pcr.areaarea(subcatch)*area_multiplier
    pcr.report(surface, 'surface.map')

    error_abs = pcr.scalar(1e10)  # initial error (very high)
    volume_catch = pcr.areatotal(volume, subcatch)
    # pcr.report(volume_catch, 'volume_catch.map')

    depth_catch = volume_catch/surface
    pcr.report(depth_catch, 'depth_catch.map')

    dem_max = pcr.ifthenelse(volume_catch > volume_thres, pcr.scalar(32.),
                             pcr.scalar(0))  # bizarre high inundation depth
    dem_min = pcr.scalar(0.)
    for n in range(iterations):
        print('Iteration: {:02d}'.format(n + 1))
        #####while np.logical_and(error_abs > error_thres, dem_min < dem_max):
        dem_av = (dem_min + dem_max)/2
        # pcr.report(dem_av, 'dem_av00.{:03d}'.format(n + 1))
        # compute value at dem_av
        average_depth_catch = pcr.areaaverage(pcr.max(dem_av - dem_norm, 0), subcatch)
        # pcr.report(average_depth_catch, 'depth_c0.{:03d}'.format(n + 1))
        error = pcr.cover((depth_catch-average_depth_catch)/depth_catch, depth_catch*0)
        # pcr.report(error, 'error000.{:03d}'.format(n + 1))
        dem_min = pcr.ifthenelse(error > 0, dem_av, dem_min)
        dem_max = pcr.ifthenelse(error <= 0, dem_av, dem_max)
    # error_abs = np.abs(error)  # TODO: not needed probably, remove
    inundation = pcr.max(dem_av - dem_norm, 0)
    return inundation
Пример #4
0
  def testReportNonSpatial(self):
    raster = pcraster.readmap("abs_Expr.map")
    max1 = pcraster.mapmaximum(raster)
    value, isValid = pcraster.cellvalue(max1, 1)
    self.assertTrue(isinstance(value, float))
    self.assertEqual(isValid, True)
    self.assertEqual(value, 14.0)
    pcraster.report(max1, "maximum.map")
    max2 = pcraster.readmap("maximum.map")

    for i in range(1, 8):
      value, isValid = pcraster.cellvalue(max2, i)
      self.assertEqual(isValid, True)
      self.assertTrue(isinstance(value, float))
      self.assertEqual(value, 14.0)
	def initial(self):
		#####################
		# * initial section #
		#####################
		#-constants
		# betaQ [-]: constant of kinematic wave momentum equation
		self.betaQ= 0.6
		#-channel LDD
		self.channelLDD= pcr.ifthenelse(self.waterBodies.distribution != 0,\
			pcr.ldd(5),self.LDD)
		#-channel area and storage
		self.channelArea= self.channelWidth*self.channelLength
		self.channelStorageCapacity= pcr.ifthenelse(self.waterBodies.distribution == 0,\
			self.channelArea*self.channelDepth,pcr.scalar(0.))
		#-basin outlets
		self.basinOutlet= pcr.pit(self.LDD) != 0
		#-read initial conditions
		self.Q= clippedRead.get(self.QIniMap)
		self.actualStorage= clippedRead.get(self.actualStorageIniMap)
		self.actualStorage= pcr.ifthenelse(self.waterBodies.distribution != 0,\
			pcr.ifthenelse(self.waterBodies.location != 0,\
				pcr.areatotal(self.actualStorage,self.waterBodies.distribution),0),\
					self.actualStorage)   
		self.waterBodies.actualStorage= self.waterBodies.retrieveMapValue(self.actualStorage)
		#-update targets of average and bankful discharge
		self.waterBodies.averageQ= self.waterBodies.retrieveMapValue(self.averageQ)
		self.waterBodies.bankfulQ= self.waterBodies.retrieveMapValue(self.bankfulQ)
		#-return the parameters for the kinematic wave,
		# including alpha, wetted area, flood fraction, flood volume and depth
		# and the corresponding land area
		floodedFraction,floodedDepth,\
			self.wettedArea,self.alphaQ= self.kinAlphaComposite(self.actualStorage,self.floodplainMask)
		self.wettedArea= self.waterBodies.returnMapValue(self.wettedArea,\
			self.waterBodies.channelWidth+2.*self.waterBodies.updateWaterHeight())
		self.waterFraction= pcr.ifthenelse(self.waterBodies.distribution == 0,\
			pcr.max(self.waterFractionMask,floodedFraction),self.waterFractionMask)
		self.landFraction= pcr.max(0.,1.-self.waterFraction)
		#-update on velocity and check on Q - NOTE: does not work in case of reservoirs!
		self.flowVelocity= pcr.ifthenelse(self.wettedArea > 0,self.Q/self.wettedArea,0.)
		pcr.report(self.flowVelocity,pcrm.generateNameT(flowVelocityFileName,0).replace('.000','.ini'))
		#-setting initial values for specific runoff and surface water extraction
		self.landSurfaceQ= pcr.scalar(0.)
		self.potWaterSurfaceQ= pcr.scalar(0.)
		self.surfaceWaterExtraction= pcr.scalar(0.)
		#-budget check: setting initial values for cumulative discharge and 
		# net cumulative input, including initial storage [m3]   
		self.totalDischarge= pcr.scalar(0.)
		self.cumulativeDeltaStorage= pcr.catchmenttotal(self.actualStorage,self.LDD)
Пример #6
0
    def set_value(self, long_var_name, src):
        """
        Set the values(s) in a map using a numpy array as source

        :var long_var_name: identifier of a variable in the model.
        :var src: all values to set for the given variable. If only one value
                  is present a uniform map will be set in the wflow model.
        """
        # first part should be the component name
        self.bmilogger.debug("set_value: " + long_var_name)
        cname = long_var_name.split(self.comp_sep)
        if cname[0] in self.bmimodels:
            self.bmimodels[cname[0]].set_value(cname[1], src)
            if self.wrtodisk:
                pcr.report(
                    pcr.numpy2pcr(pcr.Scalar, src, -999),
                    long_var_name + "_set_" + str(self.get_current_time()) + ".map",
                )
Пример #7
0
    def parse_arguments(self,
            arguments):
        argument_list = []
        for argument in arguments:
            assert isinstance(argument, (str, list, pcraster.Field)), \
                "{} has type {}".format(argument, type(argument))

            if isinstance(argument, str):
                argument_list.append(argument)
            elif isinstance(argument, list):
                argument_list.append(" + ".join(self.parse_arguments(argument)))
            elif isinstance(argument, pcraster.Field):
                temp_name = tempfile.NamedTemporaryFile(dir=os.getcwd()).name
                pcraster.report(argument, temp_name)
                self.created_files_names.append(temp_name)
                argument_list.append(temp_name)

        return argument_list
Пример #8
0
    def dynamic(self):
        # Write state, using unique names.
        names = [pcraster.framework.generateNameST("dummy{}x".format(i),
            self.currentSampleNumber(), self.currentTimeStep()) for i in
                range(1, self.nr_state_variables + 1)]

        # Write state.
        for name in names:
            pcraster.report(self.dummy, name)

        # Read state.
        for name in names:
            pcraster.readmap(name)

        # Remove state.
        for name in names:
            os.remove(name)

        self.print_memory_used()
Пример #9
0
    def set_value_at_indices(self, long_var_name, inds, src):
        """
        Set the values in a variable using a numpy array of the values given indices

        :var long_var_name: identifier of a variable in the model:
        :var inds: List of Lists of integers inds each nested List contains one index for each dimension of the given variable,
                                        i.e. each nested List indicates one element in the multi-dimensional variable array,
                                        e.g. [[0, 0], [0, 1], [15, 19], [15, 20], [15, 21]] indicates 5 elements in a 2D grid.:
        :var src: Numpy array of values. one value to set for each of the indicated elements:
        """
        cname = long_var_name.split(self.comp_sep)

        if cname[0] in self.bmimodels:
            self.bmimodels[cname[0]].set_value_at_indices(cname[1], inds, src)
            if self.wrtodisk:
                npmap = self.bmimodels[cname[0]].getnumpy(cname[1], inds, src)
                pcr.report(
                    self.bmimodels[cname[0]].get_value(cname[1]),
                    long_var_name + "_set_" + str(self.get_current_time()) + ".map",
                )
Пример #10
0
    def get_value_at_indices(self, long_var_name, inds):
        """
        Get a numpy array of the values at the given indices

        :var long_var_name: identifier of a variable in the model:
        :var inds: List of list each tuple contains one index for each dimension of the given variable, i.e. each tuple indicates one element in the multi-dimensional variable array:

        :return: numpy array of values in the data type returned by the function get_var_type.
        """
        cname = long_var_name.split(self.comp_sep)

        if cname[0] in self.bmimodels:
            tmp = self.bmimodels[cname[0]].get_value(cname[1])
            if self.wrtodisk:
                pcr.report(
                    pcr.numpy2pcr(pcr.Scalar, tmp, -999),
                    long_var_name + "_get_" + str(self.get_current_time()) + ".map",
                )
            return self.bmimodels[cname[0]].get_value_at_indices(cname[1], inds)
        # else:
        return None
Пример #11
0
    def get_value(self, long_var_name):
        """
        Get the value(s) of a variable as a numpy array

        :var long_var_name: name of the variable
        :return: a np array of long_var_name
        """
        # first part should be the component name
        self.bmilogger.debug("get_value: " + long_var_name)
        cname = long_var_name.split(self.comp_sep)
        if cname[0] in self.bmimodels:
            tmp = self.bmimodels[cname[0]].get_value(cname[1])
            if self.wrtodisk:
                pcr.report(
                    pcr.numpy2pcr(pcr.Scalar, tmp, -999),
                    long_var_name + "_get_" + str(self.get_current_time()) + ".map",
                )
            return tmp
        else:
            self.bmilogger.error("get_value: " + long_var_name + " returning None!!!!")
            return None
Пример #12
0
  def _reportNew(self,
    variable,
    name,
    style=1):
    """

    .. todo::

      `style` argument is not used.
    """
    head, tail = os.path.split(name)

    if re.search("\.", tail):
      msg = "File extension given in '" + name + "' not allowed, provide filename without extension"
      raise FrameworkError(msg)

    directoryPrefix = ""
    nameSuffix = ".map"
    newName = ""

    if hasattr(self._userModel(), "_inStochastic"):
      if self._userModel()._inStochastic():
        if self._userModel()._inPremc():
          newName = name + nameSuffix
        elif self._userModel()._inPostmc():
          newName = name + nameSuffix
        else:
          directoryPrefix = str(self._userModel().currentSampleNumber())

    if self._userModel()._inInitial():
      newName = name + nameSuffix

    if hasattr(self._userModel(), "_inDynamic"):
      if self._userModel()._inDynamic() or self._inUpdateWeight():
        newName = generateNameT(name, self._userModel().currentTimeStep())

    path = os.path.join(directoryPrefix, newName)
    import pcraster
    pcraster.report(variable, path)
Пример #13
0
    def parse_arguments(self,
            arguments):
        argument_list = []
        for argument in arguments:
            assert isinstance(argument, (str, list, pcraster.Field)), \
                "{} has type {}".format(argument, type(argument))

            if isinstance(argument, str):
                argument_list.append(argument)
            elif isinstance(argument, list):
                argument_list.append(" + ".join(self.parse_arguments(argument)))
            elif isinstance(argument, pcraster.Field):
                with warnings.catch_warnings():
                    # RuntimeWarning: tempnam is a potential security risk to
                    # your program.
                    warnings.simplefilter("ignore")
                    temp_name = os.tempnam(os.getcwd())
                pcraster.report(argument, temp_name)
                self.created_files_names.append(temp_name)
                argument_list.append(temp_name)

        return argument_list
Пример #14
0
 def test_5(self):
   """ unpickle scalar """
   field_pkl = pickle.load(open("pickle_scalar.pkl", "r"))
   pcraster.report(field_pkl, "bla_scalar.map")
   self.failUnless(self.mapEqualsValidated(field_pkl, "sin_Result.map"))
Пример #15
0
 def test_2(self):
   """ unpickle boolean """
   field_pkl = pickle.load(open("pickle_boolean.pkl", "r"))
   pcraster.report(field_pkl, "bla.map")
   self.failUnless(self.mapEqualsValidated(field_pkl, "boolean_Result.map"))
Пример #16
0
mldd.setStream(
    # TODO Create overload which takes strings, or handle internally.
    pcraster.readmap(os.path.join(dataPath, "ELddF000.out")),
    pcraster.readmap(os.path.join(dataPath, "NELddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "NLddF000.out")),
    pcraster.readmap(os.path.join(dataPath, "NWLddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "SELddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "SLddF000.out")),
    pcraster.readmap(os.path.join(dataPath, "SWLddF00.out")),
    pcraster.readmap(os.path.join(dataPath, "WLddF000.out")))
mldd.addStream(pcraster.readmap(os.path.join(dataPath, "ELddF000.out")))
mldd.setDem(pcraster.spatial(pcraster.scalar(1)))

upstream = mldd.upstream(pcraster.spatial(pcraster.scalar(1)))
pcraster.report(upstream, "upstream.map")

accuflux = mldd.accuflux(
    pcraster.ifthen(pcraster.defined(upstream),
                    pcraster.spatial(pcraster.scalar(1))))
pcraster.report(accuflux, "accuflux.map")

dem = mldd.getDem()
pcraster.report(dem, "dem.map")

streamN, streamNE, streamE, streamSE, streamS, streamSW, streamW, streamNW = \
         mldd.getStream()

pcraster.report(streamN, "streamN.map")
pcraster.report(streamNE, "streamNE.map")
pcraster.report(streamE, "streamE.map")
    def dynamic(self):

        # re-calculate current model time using current pcraster timestep value
        self.modelTime.update(self.currentTimeStep())
        msg = "\n\n\n Processing the date " + self.modelTime.fulldate + "\n\n\n"
        logger.info(msg)

        # read netcdf file
        logger.info("Reading netcdf file.")
        # - set the clone to the necdf file extent
        pcr.setclone(self.inputClone)
        # - read netcdf file
        input_pcr = vos.netcdf2PCRobjClone(ncFile = self.netcdf_input_file, \
                                           varName = "automatic", \
                                           dateInput = self.modelTime.fulldate, \
                                           useDoy = None, \
                                           cloneMapFileName  = self.inputClone, \
                                           LatitudeLongitude = True, \
                                           specificFillValue = None)

        # reprojection
        logger.info("Reprojection.")
        #
        # - save it to a pcraster file in the temporary folder
        tmp_input_pcr_file = self.tmpDir + "/" + "tmp_input_pcr.map"
        pcr.report(input_pcr, tmp_input_pcr_file)
        # - convert it to tif
        tmp_input_tif_file = self.tmpDir + "/" + "tmp_input_pcr.tif"
        cmd = 'gdal_translate ' + tmp_input_pcr_file + " " + tmp_input_tif_file
        logger.debug(cmd)
        os.system(cmd)
        # - re-projection to the outputProjection
        tmp_reprj_tif_file = self.tmpDir + "/" + "tmp_reprj_tif.tif"
        bound_box = self.x_min_output + " " + self.y_min_output + " " + self.x_max_output + " " + self.y_max_output
        cell_size = self.cell_length + " " + self.cell_length
        cmd = 'gdalwarp '+\
              '-s_srs ' + '"' + self.inputProjection  +'" '+\
              '-t_srs ' + '"' + self.outputProjection +'" '+\
              '-te ' + bound_box + " " +\
              '-tr ' + cell_size + " " +\
              '-r '+ self.resample_method + " " +\
              '-srcnodata -3.4028234663852886e+38 -dstnodata -3.4028234663852886e+38 '+\
              tmp_input_tif_file + " "+\
              tmp_reprj_tif_file
        logger.debug(cmd)
        os.system(cmd)
        # - convert it back to pcraster map
        tmp_reprj_map_file = self.tmpDir + "/" + "tmp_reprj_map.map"
        cmd = 'gdal_translate -of PCRaster ' + tmp_reprj_tif_file + " " + tmp_reprj_map_file
        logger.debug(cmd)
        os.system(cmd)
        # - make sure that it has a valid mapattr
        cmd = 'mapattr -c ' + self.outputClone + " " + tmp_reprj_map_file
        logger.debug(cmd)
        os.system(cmd)

        # read the re-projected file
        logger.info(
            "Read the re-projected file, including unit conversion/correction."
        )
        # - set the clone to the output clone
        pcr.setclone(self.outputClone)
        output_pcr = pcr.readmap(tmp_reprj_map_file)
        # - unit conversion
        output_pcr = output_pcr * self.unit_conversion_factor + self.unit_conversion_offset
        #~ pcr.aguila(output_pcr)
        #~ raw_input("Press Enter to continue...")

        # perform area operation
        logger.info("Performing area operation.")
        output_area_pcr = pcr.areaaverage(output_pcr, self.area_class)
        #~ pcr.aguila(output_area_pcr)
        #~ raw_input("Press Enter to continue...")

        # save it to a daily tss file
        logger.info("Saving daily value to a tss file.")
        self.tss_daily_reporting.sample(output_area_pcr)

        # calculate 10 day average
        # - initiate/reset counter and accumulator
        if self.modelTime.day == 1 or self.modelTime.day == 11 or self.modelTime.day == 21:
            self.day_counter = pcr.scalar(0.0)
            self.cummulative_per_ten_days = pcr.scalar(0.0)
            self.average_per_ten_days = pcr.scalar(0.0)
        # - accumulating
        self.day_counter = self.day_counter + 1.0
        self.cummulative_per_ten_days = self.cummulative_per_ten_days + output_area_pcr
        # - calculate 10 day average and reporting
        if self.modelTime.day == 10 or self.modelTime.day == 20 or self.modelTime.isLastDayOfMonth(
        ):
            logger.info(
                'Calculating/saving 10 day average value to a tss file.')
            average_per_ten_days = self.cummulative_per_ten_days / pcr.ifthen(
                self.landmask, self.day_counter)
            #~ pcr.aguila(average_per_ten_days)
            #~ raw_input("Press Enter to continue...")
            if self.report_10day_pcr_files:
                logger.info('Saving 10 day average value to pcraster file.')
                cwd = os.getcwd()
                os.chdir(self.mapDir)
                self.report(average_per_ten_days, "dcd")
                os.chdir(cwd)
        else:
            average_per_ten_days = pcr.scalar(-9999.99)
        self.tss_10day_reporting.sample(average_per_ten_days)

        # clean the temporary folder
        cmd = 'rm -r ' + self.tmpDir + "/*"
        print cmd
        os.system(cmd)

        # change directory to the output folder so that the tss file will be stored there
        os.chdir(self.output_folder)
Пример #18
0
def joinMaps(inputTuple):
    '''Merges maps starting from an input tuple that specifies the output map name, the number of rows\
 and the number rows, columns, ULL X and Y coordinates, cell length and the missing value identifer and a list of input maps'''
    outputFileName = inputTuple[0]
    nrRows = inputTuple[1]
    nrCols = inputTuple[2]
    xMin = inputTuple[3]
    yMax = inputTuple[4]
    cellLength = inputTuple[5]
    MV = inputTuple[6]
    fileNames = inputTuple[7]
    cloneFileName = inputTuple[8]
    #-echo to screen
    print('combining files for %s' % outputFileName, end=' ')
    #-get extent
    xMax = xMin + nrCols * cellLength
    yMin = yMax - nrRows * cellLength
    xCoordinates = xMin + np.arange(nrCols + 1) * cellLength
    yCoordinates = yMin + np.arange(nrRows + 1) * cellLength
    yCoordinates = np.flipud(yCoordinates)
    print('between %.2f, %.2f and %.2f, %.2f' % (xMin, yMin, xMax, yMax))
    #-set output array
    variableArray = np.ones((nrRows, nrCols)) * MV
    #-iterate over maps
    for fileName in fileNames:
        print(fileName)
        attributeClone = getMapAttributesALL(fileName)
        cellLengthClone = attributeClone['cellsize']
        rowsClone = attributeClone['rows']
        colsClone = attributeClone['cols']
        xULClone = attributeClone['xUL']
        yULClone = attributeClone['yUL']

        # check whether both maps have the same attributes and process
        process, nd = checkResolution(cellLength, cellLengthClone)

        process = True

        if process:
            #-get coordinates and locations
            sampleXMin = xULClone
            sampleXMax = xULClone + colsClone * cellLengthClone
            sampleYMin = yULClone - rowsClone * cellLengthClone
            sampleYMax = yULClone
            sampleXCoordinates = sampleXMin + np.arange(colsClone +
                                                        1) * cellLengthClone
            sampleYCoordinates = sampleYMin + np.arange(rowsClone +
                                                        1) * cellLengthClone
            sampleYCoordinates = np.flipud(sampleYCoordinates)
            sampleXMin = getMax(xMin, sampleXMin)
            sampleXMax = getMin(xMax, sampleXMax)
            sampleYMin = getMax(yMin, sampleYMin)
            sampleYMax = getMin(yMax, sampleYMax)
            sampleRow0 = getPosition(sampleYMin, sampleYCoordinates, nd)
            sampleRow1 = getPosition(sampleYMax, sampleYCoordinates, nd)
            sampleCol0 = getPosition(sampleXMin, sampleXCoordinates, nd)
            sampleCol1 = getPosition(sampleXMax, sampleXCoordinates, nd)
            sampleRow0, sampleRow1 = checkRowPosition(sampleRow0, sampleRow1)
            variableRow0 = getPosition(sampleYMin, yCoordinates, nd)
            variableRow1 = getPosition(sampleYMax, yCoordinates, nd)
            variableCol0 = getPosition(sampleXMin, xCoordinates, nd)
            variableCol1 = getPosition(sampleXMax, xCoordinates, nd)
            variableRow0, variableRow1 = checkRowPosition(
                variableRow0, variableRow1)
            #-read sample array
            setclone(fileName)
            sampleArray = pcr2numpy(readmap(fileName), MV)
            sampleNrRows, sampleNrCols = sampleArray.shape
            #-create mask
            mask= (variableArray[variableRow0:variableRow1,variableCol0:variableCol1] == MV) &\
                (sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1] != MV)
            #-add values
            print(' adding values in %d, %d rows, columns from (x, y) %.3f, %.3f and %.3f, %.3f to position (row, col) %d, %d and %d, %d' %\
                (sampleNrRows, sampleNrCols,sampleXMin,sampleYMin,sampleXMax,sampleYMax,variableRow0,variableCol0,variableRow1,variableCol1))
            variableArray[variableRow0:variableRow1,variableCol0:variableCol1][mask]= \
                sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1][mask]
        else:
            print('%s does not match resolution and is not processed' %
                  fileName)
    #-report output map
    setclone(cloneFileName)
    report(numpy2pcr(Scalar, variableArray, MV), outputFileName)
Пример #19
0
 def test_06(self):
     """ unpickle directional """
     field_pkl = pickle.load(open("pickle_direction.pkl", "rb"))
     pcraster.report(field_pkl, "pickle_direction.map")
     self.assertTrue(
         self.mapEqualsValidated(field_pkl, "directional_Result1.map"))
    # variable name
    variable_name = str(return_period) + "_of_" + varDict.netcdf_short_name[var_name]

    msg = "Writing " + str(variable_name)
    logger.info(msg)
    
    # read from pcraster files
    #
    inundation_file_name = output_directory + "/global/maps/" + "inun_" + str(return_period) + "_of_flood_inundation_volume_catch_" + strahler_order_option + ".tif.map"
    if map_type_name == "channel_storage.map": inundation_file_name = output_directory + "/global/maps/" + "inun_" + str(return_period) + "_of_channel_storage_catch_" + strahler_order_option + ".tif.map"
    #
    inundation_map = pcr.readmap(inundation_file_name)
    inundation_map = pcr.cover(inundation_map, 0.0)
    #
    # - make sure that we have positive extreme values - this is not necessary, but to make sure
    inundation_map = pcr.max(inundation_map, 0.0)
    #
    # - make sure that extreme value maps increasing over return period - this is not necessary, but to make sure
    if i_return_period >  0: inundation_map = pcr.max(previous_return_period_map, inundation_map) 
    previous_return_period_map = inundation_map
    
    # using values in the landmask only and masking out permanent water bodies
    inundation_map = pcr.ifthen(landmask_used, inundation_map)
    inundation_map = pcr.ifthen(non_permanent_water_bodies, inundation_map)
    
    # report in pcraster maps
    pcr.report(inundation_map, inundation_file_name + ".masked_out.map")
    
    # write to netcdf files
    netcdf_report.data_to_netcdf(file_name, variable_name, pcr.pcr2numpy(inundation_map, vos.MV), timeBounds, timeStamp = None, posCnt = 0)
Пример #21
0
num_of_cols_30sec = str(vos.getMapAttributesALL(output_file)["cols"] / 10.)
x_coordinate = str(vos.getMapAttributesALL(output_file)["xUL"])
y_coordinate = str(vos.getMapAttributesALL(output_file)["yUL"])
cellsize_30sec = "0.00833333333333333333333333333333333333333333333333333333333333333333333333333333"
output_file = output_folder + "/" + tile_code + ".30sec.clo.map"
cmd = "mapattr -s -R " + num_of_rows_30sec + " -C " + num_of_cols_30sec + " -B -P yb2t -x " + x_coordinate + " -y " + y_coordinate + " -l " + cellsize_30sec + " " + output_file
print(cmd)
os.system(cmd)

# give the ids for every 30 arc sec cell (e.g. pcrcalc dem_tif_n60w180_30sec.ids.map  = "nominal(uniqueid(dem_tif_n60w180_30sec.clo.map))")
input_file = output_file
output_file = output_folder + "/" + tile_code + ".30sec.ids.map"
pcr.setclone(input_file)
print("Making the clone map at 30 arcsec resolution.")
unique_ids_30sec = pcr.nominal(pcr.uniqueid(input_file))
pcr.report(unique_ids_30sec, output_file)
# - Note that this map has 30 arc sec resolution.

# resample the ids map to 3 arc sec resolution (e.g. gdalwarp -tr 0.00083333333333333333333333333333333333333 0.00083333333333333333333333333333333333333 dem_tif_n60w180_30sec.ids.map dem_tif_n60w180_30sec.ids.3sec.tif
input_file = output_file
output_file = output_folder + "/" + tile_code + ".30sec.ids.3sec.tif"
cellsize_3sec = "0.000833333333333333333333333333333333333333333333333333333333333333333333333333333"
cmd = "gdalwarp -tr " + cellsize_3sec + " " + cellsize_3sec + " " + input_file + " " + output_file
print(cmd)
os.system(cmd)
# - This still a tif file.

# convert the tif file to PCRaster map
input_file = output_file
output_file = output_folder + "/" + tile_code + ".30sec.ids.3sec.map"
cmd = 'gdal_translate -of PCRaster ' + input_file + " " + output_file
Пример #22
0
def dumpPCRaster(name, var, num):
    path1 = os.path.join(str(num), 'stateVar', name)
    pcraster.report(var, path1)
    def modflow_simulation(self,\
                           simulation_type,\
                           initial_head,\
                           currTimeStep = None,\
                           NSTP   = 1, \
                           HCLOSE = 0.05,\
                           RCLOSE = 100.* 400.*400.,\
                           MXITER = 300,\
                           ITERI = 100,\
                           NPCOND = 1,\
                           RELAX = 1.00,\
                           NBPOL = 2,\
                           DAMP = 1,\
                           ITMUNI = 4, LENUNI = 2, PERLEN = 1.0, TSMULT = 1.0):
        # initiate pcraster modflow object
        self.initiate_modflow()

        if simulation_type == "transient":
            logger.info("Preparing MODFLOW input for a transient simulation.")
            SSTR = 0
        if simulation_type == "steady-state":
            logger.info("Preparing MODFLOW input for a steady-state simulation.")
            SSTR = 1

        # waterBody class to define the extent of lakes and reservoirs
        #
        if simulation_type == "steady-state":
            self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\
                                                       self.landmask,\
                                                       self.onlyNaturalWaterBodies)
            self.WaterBodies.getParameterFiles(date_given = self.iniItems.globalOptions['startTime'],\
                                               cellArea = self.cellAreaMap, \
                                               ldd = self.lddMap)        
        #
        if simulation_type == "transient":
            if currTimeStep.timeStepPCR == 1:
               self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\
                                                          self.landmask,\
                                                          self.onlyNaturalWaterBodies)
            if currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1:
               self.WaterBodies.getParameterFiles(date_given = str(currTimeStep.fulldate),\
                                                  cellArea = self.cellAreaMap, \
                                                  ldd = self.lddMap)        

        # using dem_average as the initial groundwater head value 
        self.pcr_modflow.setInitialHead(initial_head, 1)
        
        # set parameter values for the DIS package and PCG solver
        self.pcr_modflow.setDISParameter(ITMUNI, LENUNI, PERLEN, NSTP, TSMULT, SSTR)
        self.pcr_modflow.setPCG(MXITER, ITERI, NPCOND, HCLOSE, RCLOSE, RELAX, NBPOL, DAMP)
        #
        # Some notes about the values  
        #
        # ITMUNI = 4     # indicates the time unit (0: undefined, 1: seconds, 2: minutes, 3: hours, 4: days, 5: years)
        # LENUNI = 2     # indicates the length unit (0: undefined, 1: feet, 2: meters, 3: centimeters)
        # PERLEN = 1.0   # duration of a stress period
        # NSTP   = 1     # number of time steps in a stress period
        # TSMULT = 1.0   # multiplier for the length of the successive iterations
        # SSTR   = 1     # 0 - transient, 1 - steady state
        #
        # MXITER = 100                # maximum number of outer iterations
        # ITERI  = 30                 # number of inner iterations
        # NPCOND = 1                  # 1 - Modified Incomplete Cholesky, 2 - Polynomial matrix conditioning method;
        # HCLOSE = 0.01               # HCLOSE (unit: m) # 0.05 is working
        # RCLOSE = 10.* 400.*400.     # RCLOSE (unit: m3) ; Deltares people uses 100 m3 for their 25 m resolution modflow model  
        # RELAX  = 1.00               # relaxation parameter used with NPCOND = 1
        # NBPOL  = 2                  # indicates whether the estimate of the upper bound on the maximum eigenvalue is 2.0 (but we don ot use it, since NPCOND = 1) 
        # DAMP   = 1                  # no damping (DAMP introduced in MODFLOW 2000)
        
        # read input files (for the steady-state condition, we use pcraster maps):
        if simulation_type == "steady-state":
            # - discharge (m3/s) from PCR-GLOBWB
            discharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgDischargeInputMap'],\
                                                self.cloneMap, self.tmpDir, self.inputDir)
            # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB 
            gwRecharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgGroundwaterRechargeInputMap'],\
                                                self.cloneMap, self.tmpDir, self.inputDir)
            #
            # - for a steady state condition that will be used as the initial condition 
            #   ignore any withdrawal from groundwater
            gwRecharge = pcr.max(0.0, gwRecharge) 
            gwAbstraction = pcr.spatial(pcr.scalar(0.0))

        # read input files (for the transient, input files are given in netcdf files):
        if simulation_type == "transient":
            # - discharge (m3/s) from PCR-GLOBWB
            discharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['dischargeInputNC'],
                                               "discharge",str(currTimeStep.fulldate),None,self.cloneMap)
            # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB 
            gwRecharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterRechargeInputNC'],\
                                               "groundwater_recharge",str(currTimeStep.fulldate),None,self.cloneMap)
            # - groundwater abstraction (unit: m/day) from PCR-GLOBWB 
            gwAbstraction = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterAbstractionInputNC'],\
                                               "total_groundwater_abstraction",str(currTimeStep.fulldate),None,self.cloneMap)

        # set recharge and river packages
        self.set_river_package(discharge)
        self.set_recharge_package(gwRecharge, gwAbstraction)
        
        # execute MODFLOW 
        logger.info("Executing MODFLOW.")
        self.pcr_modflow.run()
        
        # TODO: Add the mechanism to check whether a run has converged or not.

        # obtaining the results from modflow simulation
        self.groundwaterHead = None
        self.groundwaterHead = self.pcr_modflow.getHeads(1)  

        # calculate groundwater depth only in the landmask region
        self.groundwaterDepth = pcr.ifthen(self.landmask, self.dem_average - self.groundwaterHead)
        
        # for debuging only
        pcr.report(self.groundwaterHead , "gw_head.map")
        pcr.report(self.groundwaterDepth, "gw_depth.map")
        pcr.report(self.surface_water_elevation, "surface_water_elevation.map")
 def __init__(self, distributionMap, outletMap, typeMap, channelBreadthMap,
              averageQMap, bankfulQMap, LDDMap, parameterTBL, deltaTime,
              clippedRead):
     #-clippedRead
     self.clippedRead = clippedRead
     #-constants
     self.deltaTime = deltaTime
     self.MV = -999.9
     self.cLake = 1.7
     self.minLimit = 0.0
     #-spatial field of nominal IDs delineating the extent of the respective waterbodies
     self.distribution = self.clippedRead.get(distributionMap, 'nominal')
     self.outlet = self.clippedRead.get(outletMap, 'nominal')
     waterBodiesType = self.clippedRead.get(typeMap, 'nominal')
     LDD = self.clippedRead.get(LDDMap, 'ldd')
     endorheicLakes= pcr.ifthen((pcr.areatotal(pcr.scalar(self.outlet != 0),self.distribution) == 0) & (self.distribution != 0),\
      self.distribution)
     self.location= pcr.cover(pcr.ifthen(self.outlet != 0,self.outlet),\
      pcr.ifthenelse((self.distribution != 0) & (LDD == 5),self.distribution,0))
     pcr.report(self.location, 'maps/waterbodies_reportlocations.map')
     #-extract ID and location as row and column number for outlets; these are subsequently used to extract type from other maps
     tempIDArray = pcr2numpy(self.location, self.MV)
     nrRows, nrCols = tempIDArray.shape
     iCnt = 0
     self.ID = np.ones((1)) * self.MV
     self.coordinates = np.ones((2)) * self.MV
     for row in xrange(nrRows):
         for col in xrange(nrCols):
             if tempIDArray[row, col] > 0:
                 iCnt += 1
                 if iCnt > 1:
                     self.ID = np.append(self.ID, tempIDArray[row, col])
                     self.coordinates= np.vstack((self.coordinates,\
                      np.array([row,col])))
                 else:
                     self.ID = np.array([tempIDArray[row, col]])
                     self.coordinates = np.array([row, col])
     #-process valid entries
     if self.ID[0] <> self.MV:
         #-reset nrRows to nr of rowwise entries in ID
         self.nrEntries = self.ID.shape[0]
         #-sort on ID
         indices = self.ID.argsort()
         self.ID = self.ID[indices]
         self.coordinates = self.coordinates[indices]
     else:
         self.nrEntries = None
         self.coordinates = np.array([])
     #-read from maps: type, channel breadth and average discharge
     self.type = self.retrieveMapValue(
         self.clippedRead.get(typeMap, 'nominal'))
     self.channelWidth = self.retrieveMapValue(
         self.clippedRead.get(channelBreadthMap))
     self.averageQ = self.retrieveMapValue(
         self.clippedRead.get(averageQMap))
     self.bankfulQ = self.retrieveMapValue(
         self.clippedRead.get(bankfulQMap))
     self.endorheic = self.retrieveMapValue(pcr.cover(endorheicLakes, 0))
     #-set to zero, to be updated in script
     self.demand = np.zeros((self.nrEntries))
     self.actualStorage = np.zeros((self.nrEntries))
     self.actualArea = np.zeros((self.nrEntries))
     self.actualQ = np.zeros((self.nrEntries))
     #-read from table
     self.capacity = np.ones((self.nrEntries)) * self.MV
     self.maxLimit = np.ones((self.nrEntries)) * self.MV
     self.avParameter = np.ones((self.nrEntries)) * self.MV
     tempIDArray = np.loadtxt(parameterTBL)
     if self.nrEntries <> None:
         for iCnt in xrange(self.nrEntries):
             ID = self.ID[iCnt]
             mask = tempIDArray[:, 0] == ID
             if np.any(mask):
                 #-entry in table with reservoir properties
                 #-set capacity, fractional maximum storage limit and area-volume parameter
                 self.capacity[iCnt] = tempIDArray[:, 1][mask]
                 self.maxLimit[iCnt] = tempIDArray[:, 2][mask]
                 self.avParameter[iCnt] = tempIDArray[:, 3][mask]
             else:
                 #-lake or wetland: set capacity and upper limit to zero and the area-volume parameter
                 # to default values
                 self.capacity[iCnt] = 0.
                 self.maxLimit[iCnt] = 0.
                 if self.type[iCnt] == 1:
                     #-lake
                     self.avParameter[iCnt] = 210.5
                 else:
                     #-wetland
                     self.avParameter[iCnt] = 1407.2
                                          varUnit    = variable_unit, \
                                          longName   = var_long_name, \
                                          comment    = varDict.comment[var_name]
                                          )
        
        # store the variables to pcraster map and netcdf files:
        data_dictionary = {}
        for return_period in return_periods:
            
            # variable name
            variable_name = str(return_period) + "_of_" + varDict.netcdf_short_name[var_name]
            
            # report to a pcraster map
            #~ print bias_type
            #~ print return_period
            pcr.report(pcr.ifthen(landmask, extreme_values[bias_type][return_period]), bias_type + "_" + variable_name + ".map")
            #~ if "above_reference_at_the_same_return_period" in bias_type: pcr.aguila(pcr.ifthen(landmask, extreme_values[bias_type][return_period]))
        
            # put it into a dictionary
            data_dictionary[variable_name] = pcr.pcr2numpy(extreme_values[bias_type][return_period], vos.MV)
        
        # save the variables to a netcdf file
        netcdf_report.dictionary_of_data_to_netcdf(netcdf_file[bias_type][var_name]['file_name'], \
                                                   data_dictionary, \
                                                   timeBounds)

    # saving "return_period_historical" and "problematic_mult_with_zero_historical_gcm"
    # - to pcraster files only
    for return_period in return_periods:

        # report to pcraster maps
Пример #26
0
startTime        = time.time()
scratchDir       = '/home/straa005/masks/scratch'
cellSizeMinutes  = 5  # minutes; will be converted to decimal degrees
os.chdir(scratchDir)
cloneMap         = makeGlobalMap('clone%smin.map'%cellSizeMinutes, cellSize=cellSizeMinutes/60., dataType='S', output='map')
ldd              = pcr.readmap('/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map')
mask48           = pcr.readmap('/home/straa005/masks/areas48/mask48.map')
areas            = range(1,47,1)

pitsDict = {13 : [13,3153],
            28 : [28,3515]}

lddOld = pcr.readmap('/home/straa005/LDD/lddOutput/ldd_HydroSHEDS_Hydro1k_5min.map')

lddDiff = pcr.scalar(ldd) - pcr.scalar(lddOld)
pcr.report(pcr.boolean(lddDiff), 'lddDiff.map')


p
############################### main ###################################
ldd = pcr.nominal(ldd)
ldd = fillMVBoundingBox(ldd, 1, 57, 76, 26, 47)
ldd = fillMVBoundingBox(ldd, 1, 58, 62, 51, 60)
ldd = fillMVBoundingBox(ldd, 1, -108, -101, 31, 40)
ldd = pcr.lddrepair(pcr.ldd(ldd))
pits = pcr.pit(ldd)

pcr.setglobaloption('unitcell')

continentMasks = pcr.cover(mask48, pcr.windowmajority(mask48,5))
Пример #27
0
        var_long_name = str(
            return_period) + "_of_" + varDict.netcdf_long_name[var_name]
        #
        netcdf_report.create_variable(\
                                      ncFileName = netcdf_file[var_name]['file_name'], \
                                      varName    = variable_name, \
                                      varUnit    = variable_unit, \
                                      longName   = var_long_name, \
                                      comment    = varDict.comment[var_name]
                                      )

    # store the variables to pcraster map and netcdf files:
    data_dictionary = {}
    for return_period in return_periods:

        # variable name
        variable_name = str(
            return_period) + "_of_" + varDict.netcdf_short_name[var_name]

        # report to a pcraster map
        pcr.report(extreme_values[return_period], variable_name + ".map")

        # put it into a dictionary
        data_dictionary[variable_name] = pcr.pcr2numpy(
            extreme_values[return_period], vos.MV)

    # save the variables to a netcdf file
    netcdf_report.dictionary_of_data_to_netcdf(netcdf_file[var_name]['file_name'], \
                                               data_dictionary, \
                                               timeBounds)
msg = "Set the landmask to : " + str(landmask_map_file)
logger.info(msg)
landmask = pcr.readmap(landmask_map_file)

# resampling low resolution ldd map
msg = "Resample the low resolution ldd map."
logger.info(msg)
ldd_map_low_resolution_file_name = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/lddsound_05min.map"
ldd_map_low_resolution = vos.readPCRmapClone(ldd_map_low_resolution_file_name, \
                                             clone_map_file, \
                                             tmp_folder, \
                                             None, True, None, False)
ldd_map_low_resolution = pcr.ifthen(landmask, ldd_map_low_resolution)    # NOTE THAT YOU MAY NOT HAVE TO MASK-OUT THE LDD.
ldd_map_low_resolution = pcr.lddrepair(pcr.ldd(ldd_map_low_resolution))
ldd_map_low_resolution = pcr.lddrepair(ldd_map_low_resolution)
pcr.report(ldd_map_low_resolution, "resampled_low_resolution_ldd.map")


# permanent water bodies files (at 5 arc-minute resolution)
reservoir_capacity_file = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/reservoirs/waterBodiesFinal_version15Sept2013/maps/reservoircapacity_2010.map"
fracwat_file            = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/reservoirs/waterBodiesFinal_version15Sept2013/maps/fracwat_2010.map"
water_body_id_file      = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/reservoirs/waterBodiesFinal_version15Sept2013/maps/waterbodyid_2010.map"


# cell_area_file
cell_area_file = "/projects/0/dfguu/data/hydroworld/PCRGLOBWB20/input5min/routing/cellsize05min.correct.map"


# bankfull capacity (5 arcmin, volume: m3)
#~ surface_water_bankfull_capacity_file_name = None
#~ surface_water_bankfull_capacity_file_name = "/projects/0/aqueduct/users/edwinsut/aqueduct_flood_analyzer_results/version_2016_12_11/flood_analyzer_analysis/historical/extreme_values/watch_1960-1999/2-year_of_channel_storage.map"
Пример #29
0
def main():

    ### Read input arguments #####
    logfilename = 'wtools_static_maps.log'
    parser = OptionParser()
    usage = "usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option('-q', '--quiet',
                      dest='verbose', default=True, action='store_false',
                      help='do not print status messages to stdout')
    parser.add_option('-i', '--ini', dest='inifile', default=None,
                      help='ini file with settings for static_maps.exe')
    parser.add_option('-s', '--source',
                      dest='source', default='wflow',
                      help='Source folder containing clone (default=./wflow)')
    parser.add_option('-d', '--destination',
                      dest='destination', default='staticmaps',
                      help='Destination folder (default=./staticmaps)')
    parser.add_option('-r', '--river',
                      dest='rivshp', default=None,
                      help='river network polyline layer (ESRI Shapefile)')
    parser.add_option('-c', '--catchment',
                      dest='catchshp', default=None,
                      help='catchment polygon layer (ESRI Shapefile)')
    parser.add_option('-g', '--gauges',
                      dest='gaugeshp', default=None,
                      help='gauge point layer (ESRI Shapefile)')
    parser.add_option('-D', '--dem',
                      dest='dem_in', default=None,
                      help='digital elevation model (GeoTiff)')
    parser.add_option('-L', '--landuse',
                      dest='landuse', default=None,
                      help='land use / land cover layer (GeoTiff)')
    parser.add_option('-S', '--soiltype',
                      dest='soil', default=None,
                      help='soil type layer (GeoTiff)')
    parser.add_option('-V', '--vegetation',
                      dest='lai', default=None,
                      help='vegetation LAI layer location (containing 12 GeoTiffs <LAI00000.XXX.tif>)')
    parser.add_option('-O', '--other_maps',
                      dest='other_maps', default=None,
                      help='bracketed [] comma-separated list of paths to other maps that should be reprojected')
    parser.add_option('-C', '--clean',
                      dest='clean', default=False, action='store_true',
                      help='Clean the .xml files from static maps folder when finished')
    parser.add_option('-A', '--alltouch',
                      dest='alltouch', default=False, action='store_true',
                      help='option to burn catchments "all touching".\nUseful when catchment-size is small compared to cellsize')
    (options, args) = parser.parse_args()
    # parse other maps into an array
    options.other_maps = options.other_maps.replace(' ', '').replace('[', '').replace(']', '').split(',')

    options.source = os.path.abspath(options.source)
    clone_map = os.path.join(options.source, 'mask.map')
    clone_shp = os.path.join(options.source, 'mask.shp')
    clone_prj = os.path.join(options.source, 'mask.prj')
    
    if None in (options.inifile,
                options.rivshp,
                options.catchshp,
                options.dem_in):
        msg = """The following files are compulsory:
        - ini file
        - DEM (raster)
        - river (shape)
        - catchment (shape)
        """
        print(msg)
        parser.print_help()
        sys.exit(1)
    if not os.path.exists(options.inifile):
        print 'path to ini file cannot be found'
        sys.exit(1)
    if not os.path.exists(options.rivshp):
        print 'path to river shape cannot be found'
        sys.exit(1)
    if not os.path.exists(options.catchshp):
        print 'path to catchment shape cannot be found'
        sys.exit(1)
    if not os.path.exists(options.dem_in):
        print 'path to DEM cannot be found'
        sys.exit(1)    
        
    
    # open a logger, dependent on verbose print to screen or not
    logger, ch = wtools_lib.setlogger(logfilename, 'WTOOLS', options.verbose)

    # create directories # TODO: check if workdir is still necessary, try to keep in memory as much as possible

    # delete old files (when the source and destination folder are different)
    if np.logical_and(os.path.isdir(options.destination),
                      options.destination is not options.source):
        shutil.rmtree(options.destination)
    if options.destination is not options.source:
        os.makedirs(options.destination)

    # Read mask
    if not(os.path.exists(clone_map)):
        logger.error('Clone file {:s} not found. Please run create_grid first.'.format(clone_map))
        sys.exit(1)
    else:
        # set clone
        pcr.setclone(clone_map)
        # get the extent from clone.tif
        xax, yax, clone, fill_value = gis.gdal_readmap(clone_map, 'GTiff')
        trans = wtools_lib.get_geotransform(clone_map)
        extent = wtools_lib.get_extent(clone_map)
        xmin, ymin, xmax, ymax = extent
        zeros = np.zeros(clone.shape)
        ones = pcr.numpy2pcr(pcr.Scalar, np.ones(clone.shape), -9999)
        # get the projection from clone.tif
        srs = wtools_lib.get_projection(clone_map)
        unit_clone = srs.GetAttrValue('UNIT').lower()

    ### READ CONFIG FILE
    # open config-file
    config=wtools_lib.OpenConf(options.inifile)
    
    # read settings
    snapgaugestoriver = wtools_lib.configget(config, 'settings',
                                             'snapgaugestoriver',
                                              True, datatype='boolean')
    burnalltouching = wtools_lib.configget(config, 'settings',
                                           'burncatchalltouching',
                                            True, datatype='boolean')
    burninorder = wtools_lib.configget(config, 'settings',
                                       'burncatchalltouching',
                                       False, datatype='boolean')
    verticetollerance = wtools_lib.configget(config, 'settings',
                                             'vertice_tollerance',
                                             0.0001, datatype='float')
    
    ''' read parameters '''
    burn_outlets = wtools_lib.configget(config, 'parameters',
                                        'burn_outlets', 10000,
                                        datatype='int')
    burn_rivers = wtools_lib.configget(config, 'parameters',
                                       'burn_rivers', 200,
                                       datatype='int')
    burn_connections = wtools_lib.configget(config, 'parameters',
                                            'burn_connections', 100,
                                            datatype='int')
    burn_gauges = wtools_lib.configget(config, 'parameters',
                                       'burn_gauges', 100,
                                       datatype='int')
    minorder = wtools_lib.configget(config, 'parameters',
                                    'riverorder_min', 3,
                                    datatype='int')
    percentiles = np.array(
        config.get('parameters', 'statisticmaps', '0, 100').replace(
            ' ', '').split(','), dtype='float')
            
    # read the parameters for generating a temporary very high resolution grid
    if unit_clone == 'degree':
       cellsize_hr = wtools_lib.configget(config, 'parameters',
                                          'highres_degree', 0.0005,
                                          datatype='float')
    elif (unit_clone == 'metre') or (unit_clone == 'meter'):
       cellsize_hr = wtools_lib.configget(config, 'parameters',
                                          'highres_metre', 50,
                                          datatype='float') 
    
    cols_hr = int((float(xmax)-float(xmin))/cellsize_hr + 2)
    rows_hr = int((float(ymax)-float(ymin))/cellsize_hr + 2)
    hr_trans = (float(xmin), cellsize_hr, float(0),
                float(ymax), 0, -cellsize_hr)
    clone_hr = os.path.join(options.destination, 'clone_highres.tif')
    # make a highres clone as well!
    wtools_lib.CreateTif(clone_hr, rows_hr, cols_hr, hr_trans, srs, 0)

    # read staticmap locations
    catchment_map = wtools_lib.configget(config, 'staticmaps',
                                         'catchment', 'wflow_catchment.map')
    dem_map = wtools_lib.configget(config, 'staticmaps',
                                   'dem', 'wflow_dem.map')
    demmax_map = wtools_lib.configget(config, 'staticmaps',
                                      'demmax', 'wflow_demmax.map')
    demmin_map = wtools_lib.configget(config, 'staticmaps',
                                      'demmin', 'wflow_demmin.map')
    gauges_map = wtools_lib.configget(config, 'staticmaps',
                                      'gauges', 'wflow_gauges.map')
    landuse_map = wtools_lib.configget(config, 'staticmaps',
                                       'landuse', 'wflow_landuse.map')
    ldd_map = wtools_lib.configget(config, 'staticmaps',
                                   'ldd', 'wflow_ldd.map')
    river_map = wtools_lib.configget(config, 'staticmaps',
                                     'river', 'wflow_river.map')
    outlet_map = wtools_lib.configget(config, 'staticmaps',
                                      'outlet', 'wflow_outlet.map')
    riverlength_fact_map = wtools_lib.configget(config, 'staticmaps',
                                                'riverlength_fact',
                                                'wflow_riverlength_fact.map')
    soil_map = wtools_lib.configget(config, 'staticmaps',
                                    'soil', 'wflow_soil.map')
    streamorder_map = wtools_lib.configget(config, 'staticmaps',
                                           'streamorder',
                                           'wflow_streamorder.map')
    subcatch_map = wtools_lib.configget(config, 'staticmaps',
                                        'subcatch', 'wflow_subcatch.map')

    # read mask location (optional)
    masklayer = wtools_lib.configget(config, 'mask', 'masklayer', options.catchshp)


    # ???? empty = pcr.ifthen(ones == 0, pcr.scalar(0))

    # TODO: check if extents are correct this way
    # TODO: check what the role of missing values is in zeros and ones (l. 123 in old code)

    # first add a missing value to dem_in
    ds = gdal.Open(options.dem_in, gdal.GA_Update)
    RasterBand = ds.GetRasterBand(1)
    fill_val = RasterBand.GetNoDataValue()

    if fill_val is None:
        RasterBand.SetNoDataValue(-9999)
    ds = None
    
    # reproject to clone map: see http://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python
    # resample DEM
    logger.info('Resampling dem from {:s} to {:s}'.format(os.path.abspath(options.dem_in), os.path.join(options.destination, dem_map)))
    gis.gdal_warp(options.dem_in, clone_map, os.path.join(options.destination, dem_map), format='PCRaster', gdal_interp=gdalconst.GRA_Average)
    # retrieve amount of rows and columns from clone
    # TODO: make windowstats applicable to source/target with different projections. This does not work yet.
    # retrieve srs from DEM
    try:
        srs_dem = wtools_lib.get_projection(options.dem_in)
    except:
        logger.warning('No projection found in DEM, assuming WGS 1984 lat long')
        srs_dem = osr.SpatialReference()
        srs_dem.ImportFromEPSG(4326)
    clone2dem_transform = osr.CoordinateTransformation(srs,srs_dem)
    #if srs.ExportToProj4() == srs_dem.ExportToProj4():
    for percentile in percentiles:
        if percentile >= 100:
            logger.info('computing window maximum')
            percentile_dem = os.path.join(options.destination, 'wflow_dem_max.map')
        elif percentile <= 0:
            logger.info('computing window minimum')
            percentile_dem = os.path.join(options.destination, 'wflow_dem_min.map')
        else:
            logger.info('computing window {:d} percentile'.format(int(percentile)))
            percentile_dem = os.path.join(options.destination, 'wflow_dem_{:03d}.map'.format(int(percentile)))

        percentile_dem = os.path.join(options.destination, 'wflow_dem_{:03d}.map'.format(int(percentile)))
        stats = wtools_lib.windowstats(options.dem_in, len(yax), len(xax),
                               trans, srs, percentile_dem, percentile, transform=clone2dem_transform,logger=logger)
#    else:
#        logger.warning('Projections of DEM and clone are different. DEM statistics for different projections is not yet implemented')

    """

    # burn in rivers
    # first convert and clip the river shapefile
    # retrieve river shape projection, if not available assume EPSG:4326
    file_att = os.path.splitext(os.path.basename(options.rivshp))[0]
    ds = ogr.Open(options.rivshp)
    lyr = ds.GetLayerByName(file_att)
    extent = lyr.GetExtent()
    extent_in = [extent[0], extent[2], extent[1], extent[3]]
    try:
        # get spatial reference from shapefile
        srs_rivshp = lyr.GetSpatialRef()
        logger.info('Projection in river shapefile is {:s}'.format(srs_rivshp.ExportToProj4()))
    except:
        logger.warning('No projection found in {:s}, assuming WGS 1984 lat-lon'.format(options.rivshp))
        srs_rivshp = osr.SpatialReference()
        srs_rivshp.ImportFromEPSG(4326)
    rivprojshp = os.path.join(options.destination, 'rivshp_proj.shp')
    logger.info('Projecting and clipping {:s} to {:s}'.format(options.rivshp, rivprojshp))
    # TODO: Line below takes a very long time to process, the bigger the shapefile, the more time. How do we deal with this?
    call(('ogr2ogr','-s_srs', srs_rivshp.ExportToProj4(),'-t_srs', srs.ExportToProj4(), '-clipsrc', '{:f}'.format(xmin), '{:f}'.format(ymin), '{:f}'.format(xmax), '{:f}'.format(ymax), rivprojshp, options.rivshp))
    """

    # TODO: BURNING!!


    # project catchment layer to projection of clone
    file_att = os.path.splitext(os.path.basename(options.catchshp))[0]
    print options.catchshp
    ds = ogr.Open(options.catchshp)
    lyr = ds.GetLayerByName(file_att)
    extent = lyr.GetExtent()
    extent_in = [extent[0], extent[2], extent[1], extent[3]]
    try:
        # get spatial reference from shapefile
        srs_catchshp = lyr.GetSpatialRef()
        logger.info('Projection in catchment shapefile is {:s}'.format(srs_catchshp.ExportToProj4()))
    except:
        logger.warning('No projection found in {:s}, assuming WGS 1984 lat-lon'.format(options.catchshp))
        srs_catchshp = osr.SpatialReference()
        srs_catchshp.ImportFromEPSG(4326)
    catchprojshp = os.path.join(options.destination, 'catchshp_proj.shp')
    logger.info('Projecting {:s} to {:s}'.format(options.catchshp, catchprojshp))
    call(('ogr2ogr','-s_srs', srs_catchshp.ExportToProj4(),'-t_srs', srs.ExportToProj4(), '-clipsrc', '{:f}'.format(xmin), '{:f}'.format(ymin), '{:f}'.format(xmax), '{:f}'.format(ymax), catchprojshp, options.catchshp))

    #
    logger.info('Calculating ldd')
    ldddem = pcr.readmap(os.path.join(options.destination, dem_map))
    ldd_select=pcr.lddcreate(ldddem, 1e35, 1e35, 1e35, 1e35)
    pcr.report(ldd_select, os.path.join(options.destination, 'wflow_ldd.map'))

    # compute stream order, identify river cells
    streamorder = pcr.ordinal(pcr.streamorder(ldd_select))
    river = pcr.ifthen(streamorder >= pcr.ordinal(minorder), pcr.boolean(1))
    # find the minimum value in the DEM and cover missing values with a river with this value. Effect is none!! so now left out!
    # mindem = int(np.min(pcr.pcr2numpy(pcr.ordinal(os.path.join(options.destination, dem_map)),9999999)))
    # dem_resample_map = pcr.cover(os.path.join(options.destination, dem_map), pcr.scalar(river)*0+mindem)
    # pcr.report(dem_resample_map, os.path.join(options.destination, dem_map))
    pcr.report(streamorder, os.path.join(options.destination, streamorder_map))
    pcr.report(river, os.path.join(options.destination, river_map))

    # deal with your catchments
    if options.gaugeshp == None:
        logger.info('No gauges defined, using outlets instead')
        gauges = pcr.ordinal(
            pcr.uniqueid(
                pcr.boolean(
                    pcr.ifthen(pcr.scalar(ldd_select)==5,
                               pcr.boolean(1)
                               )
                )
            )
        )
        pcr.report(gauges, os.path.join(options.destination, gauges_map))
    # TODO: Add the gauge shape code from StaticMaps.py (line 454-489)
    # TODO: add river length map (see SticMaps.py, line 492-499)

    # report river length
    # make a high resolution empty map
    dem_hr_file = os.path.join(options.destination, 'dem_highres.tif')
    burn_hr_file = os.path.join(options.destination, 'burn_highres.tif')
    demburn_hr_file = os.path.join(options.destination, 'demburn_highres.map')
    riv_hr_file = os.path.join(options.destination, 'riv_highres.map')
    gis.gdal_warp(options.dem_in, clone_hr, dem_hr_file)
    # wtools_lib.CreateTif(riv_hr, rows_hr, cols_hr, hr_trans, srs, 0)
    file_att = os.path.splitext(os.path.basename(options.rivshp))[0]
    # open the shape layer
    ds = ogr.Open(options.rivshp)
    lyr = ds.GetLayerByName(file_att)
    gis.ogr_burn(lyr, clone_hr, -100, file_out=burn_hr_file,
                  format='GTiff', gdal_type=gdal.GDT_Float32, fill_value=0)
    # read dem and burn values and add
    xax_hr, yax_hr, burn_hr, fill = gis.gdal_readmap(burn_hr_file, 'GTiff')
    burn_hr[burn_hr==fill] = 0
    xax_hr, yax_hr, dem_hr, fill = gis.gdal_readmap(dem_hr_file, 'GTiff')
    dem_hr[dem_hr==fill] = np.nan
    demburn_hr = dem_hr + burn_hr
    demburn_hr[np.isnan(demburn_hr)] = -9999
    gis.gdal_writemap(demburn_hr_file, 'PCRaster', xax_hr, yax_hr, demburn_hr, -9999.)
    pcr.setclone(demburn_hr_file)
    demburn_hr = pcr.readmap(demburn_hr_file)
    ldd_hr = pcr.lddcreate(demburn_hr, 1e35, 1e35, 1e35, 1e35)
    pcr.report(ldd_hr, os.path.join(options.destination, 'ldd_hr.map'))
    pcr.setglobaloption('unitcell')
    riv_hr = pcr.scalar(pcr.streamorder(ldd_hr) >= minorder)*pcr.downstreamdist(ldd_hr)
    pcr.report(riv_hr, riv_hr_file)
    pcr.setglobaloption('unittrue')
    pcr.setclone(clone_map)
    logger.info('Computing river length')
    #riverlength = wt.windowstats(riv_hr,clone_rows,clone_columns,clone_trans,srs_clone,resultdir,'frac',clone2dem_transform)
    riverlength = wtools_lib.windowstats(riv_hr_file, len(yax), len(xax),
                                 trans, srs, os.path.join(options.destination, riverlength_fact_map), stat='fact', logger=logger)
    # TODO: nothing happends with the river lengths yet. Need to decide how to use these

    # report outlet map
    pcr.report(pcr.ifthen(pcr.ordinal(ldd_select)==5, pcr.ordinal(1)), os.path.join(options.destination, outlet_map))

    # report subcatchment map
    subcatchment = pcr.subcatchment(ldd_select, gauges)
    pcr.report(pcr.ordinal(subcatchment), os.path.join(options.destination, subcatch_map))

    # Report land use map
    if options.landuse == None:
        logger.info('No land use map used. Preparing {:s} with only ones.'.
                    format(os.path.join(options.destination, landuse_map)))
        pcr.report(pcr.nominal(ones), os.path.join(options.destination, landuse_map))
    else:
        logger.info('Resampling land use from {:s} to {:s}'.
                    format(os.path.abspath(options.landuse),
                           os.path.join(options.destination, os.path.abspath(landuse_map))))
        gis.gdal_warp(options.landuse,
                      clone_map,
                      os.path.join(options.destination, landuse_map),
                      format='PCRaster',
                      gdal_interp=gdalconst.GRA_Mode,
                      gdal_type=gdalconst.GDT_Int32)

    # report soil map
    if options.soil == None:
        logger.info('No soil map used. Preparing {:s} with only ones.'.
                    format(os.path.join(options.destination, soil_map)))
        pcr.report(pcr.nominal(ones), os.path.join(options.destination, soil_map))
    else:
        logger.info('Resampling soil from {:s} to {:s}'.
                    format(os.path.abspath(options.soil),
                           os.path.join(options.destination, os.path.abspath(soil_map))))
        gis.gdal_warp(options.soil,
                      clone_map,
                      os.path.join(options.destination, soil_map),
                      format='PCRaster',
                      gdal_interp=gdalconst.GRA_Mode,
                      gdal_type=gdalconst.GDT_Int32)

    if options.lai == None:
        logger.info('No vegetation LAI maps used. Preparing default maps {:s} with only ones.'.
                    format(os.path.join(options.destination, soil_map)))
        pcr.report(pcr.nominal(ones), os.path.join(options.destination, soil_map))
    else:
        dest_lai = os.path.join(options.destination, 'clim')
        os.makedirs(dest_lai)
        for month in range(12):
            lai_in = os.path.join(options.lai, 'LAI00000.{:03d}'.format(month + 1))
            lai_out = os.path.join(dest_lai, 'LAI00000.{:03d}'.format(month + 1))
            logger.info('Resampling vegetation LAI from {:s} to {:s}'.
                        format(os.path.abspath(lai_in),
                               os.path.abspath(lai_out)))
            gis.gdal_warp(lai_in,
                          clone_map,
                          lai_out,
                          format='PCRaster',
                          gdal_interp=gdalconst.GRA_Bilinear,
                          gdal_type=gdalconst.GDT_Float32)

    # report soil map
    if options.other_maps == None:
        logger.info('No other maps used. Skipping other maps.')
    else:
        logger.info('Resampling list of other maps...')
        for map_file in options.other_maps:
            map_name = os.path.split(map_file)[1]
            logger.info('Resampling a map from {:s} to {:s}'.
                        format(os.path.abspath(map_file),
                               os.path.join(options.destination, map_name)))
            gis.gdal_warp(map_file,
                          clone_map,
                          os.path.join(options.destination, map_name),
                          format='PCRaster',
                          gdal_interp=gdalconst.GRA_Mode,
                          gdal_type=gdalconst.GDT_Float32)


    if options.clean:
        wtools_lib.DeleteList(glob.glob(os.path.join(options.destination, '*.xml')),
                              logger=logger)
        wtools_lib.DeleteList(glob.glob(os.path.join(options.destination, 'clim', '*.xml')),
                              logger=logger)
        wtools_lib.DeleteList(glob.glob(os.path.join(options.destination, '*highres*')),
                              logger=logger)
Пример #30
0
def main():

    # output folder (and tmp folder)
    clean_out_folder = True
    if os.path.exists(out_folder):
        if clean_out_folder:
            shutil.rmtree(out_folder)
            os.makedirs(out_folder)
    else:
        os.makedirs(out_folder)
    os.chdir(out_folder)
    os.system("pwd")

    # set the clone map
    print("set the clone")
    pcr.setclone(global_ldd_30min_inp_file)

    # define the landmask
    print("define the landmask")
    # - based on the 30min input
    landmask_30min = define_landmask(input_file = global_landmask_30min_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_30min_only.map")
    # - based on the 05min input
    landmask_05min = define_landmask(input_file = global_landmask_05min_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_05min_only.map")
    # - based on the 30sec input
    landmask_30sec = define_landmask(input_file = global_landmask_30sec_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_30sec_only.map")
    # - based on the 30sec input
    landmask_03sec = define_landmask(input_file = global_landmask_03sec_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_03sec_only.map")
    #
    # - merge all landmasks
    landmask = pcr.cover(landmask_30min, landmask_05min, landmask_30sec,
                         landmask_03sec)
    pcr.report(landmask, "global_landmask_extended_30min.map")
    # ~ pcr.aguila(landmask)

    # extend ldd
    print("extend/define the ldd")
    ldd_map = pcr.readmap(global_ldd_30min_inp_file)
    ldd_map = pcr.ifthen(landmask, pcr.cover(ldd_map, pcr.ldd(5)))
    pcr.report(ldd_map, "global_ldd_extended_30min.map")
    # ~ pcr.aguila(ldd_map)

    # catchment map and size
    catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map))
    catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map)
    # ~ pcr.aguila(catchment_size)

    # identify small islands
    print("identify small islands")
    # - maps of islands smaller than 15000 cells (at half arc degree resolution)
    island_map = pcr.ifthen(landmask, pcr.clump(pcr.defined(ldd_map)))
    island_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), island_map)
    island_map = pcr.ifthen(island_size < 15000., island_map)
    # ~ # - use catchments (instead of islands)
    # ~ island_map  = catchment_map
    # ~ island_size = catchment_size
    # ~ island_map  = pcr.ifthen(island_size < 10000., island_map)
    # - sort from the largest island
    # -- take one cell per island as a representative
    island_map_rep_size = pcr.ifthen(
        pcr.areaorder(island_size, island_map) == 1.0, island_size)
    # -- sort from the largest island
    island_map_rep_ids = pcr.areaorder(
        island_map_rep_size * -1.00,
        pcr.ifthen(pcr.defined(island_map_rep_size), pcr.nominal(1.0)))
    # -- map of smaller islands, sorted from the largest one
    island_map = pcr.areamajority(pcr.nominal(island_map_rep_ids), island_map)

    # identify the biggest island for every group of small islands within a certain window (arcdeg cells)
    print("the biggest island for every group of small islands")
    large_island_map = pcr.ifthen(
        pcr.scalar(island_map) == pcr.windowminimum(pcr.scalar(island_map),
                                                    15.), island_map)
    # ~ pcr.aguila(large_island_map)

    # identify big catchments
    print("identify large catchments")
    catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map))
    catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map)
    # - identify all large catchments with size >= 50 cells (at the resolution of 30 arcmin) = 50 x (50^2) km2 = 125000 km2
    large_catchment_map = pcr.ifthen(catchment_size >= 50, catchment_map)
    # - give the codes that are different than islands
    large_catchment_map = pcr.nominal(
        pcr.scalar(large_catchment_map) +
        10. * vos.getMinMaxMean(pcr.scalar(large_island_map))[1])

    # merge biggest islands and big catchments
    print("merge large catchments and islands")
    large_catchment_and_island_map = pcr.cover(large_catchment_map,
                                               large_island_map)
    # ~ large_catchment_and_island_map = pcr.cover(large_island_map, large_catchment_map)
    large_catchment_and_island_map_size = pcr.areatotal(
        pcr.spatial(pcr.scalar(1.0)), large_catchment_and_island_map)

    # - sort from the largest one
    # -- take one cell per island as a representative
    large_catchment_and_island_map_rep_size = pcr.ifthen(
        pcr.areaorder(large_catchment_and_island_map_size,
                      large_catchment_and_island_map) == 1.0,
        large_catchment_and_island_map_size)
    # -- sort from the largest
    large_catchment_and_island_map_rep_ids = pcr.areaorder(
        large_catchment_and_island_map_rep_size * -1.00,
        pcr.ifthen(pcr.defined(large_catchment_and_island_map_rep_size),
                   pcr.nominal(1.0)))
    # -- map of largest catchments and islands, sorted from the largest one
    large_catchment_and_island_map = pcr.areamajority(
        pcr.nominal(large_catchment_and_island_map_rep_ids),
        large_catchment_and_island_map)
    # ~ pcr.report(large_catchment_and_island_map, "large_catchments_and_islands.map")

    # ~ # perform cdo fillmiss2 in order to merge the small catchments to the nearest large catchments
    # ~ print("spatial interpolation/extrapolation using cdo fillmiss2 to get initial subdomains")
    # ~ cmd = "gdal_translate -of NETCDF large_catchments_and_islands.map large_catchments_and_islands.nc"
    # ~ print(cmd); os.system(cmd)
    # ~ cmd = "cdo fillmiss2 large_catchments_and_islands.nc large_catchments_and_islands_filled.nc"
    # ~ print(cmd); os.system(cmd)
    # ~ cmd = "gdal_translate -of PCRaster large_catchments_and_islands_filled.nc large_catchments_and_islands_filled.map"
    # ~ print(cmd); os.system(cmd)
    # ~ cmd = "mapattr -c " + global_ldd_30min_inp_file + " " + "large_catchments_and_islands_filled.map"
    # ~ print(cmd); os.system(cmd)
    # ~ # - initial subdomains
    # ~ subdomains_initial = pcr.nominal(pcr.readmap("large_catchments_and_islands_filled.map"))
    # ~ subdomains_initial = pcr.areamajority(subdomains_initial, catchment_map)
    # ~ pcr.aguila(subdomains_initial)

    # spatial interpolation/extrapolation in order to merge the small catchments to the nearest large catchments
    print("spatial interpolation/extrapolation to get initial subdomains")
    field = large_catchment_and_island_map
    cellID = pcr.nominal(pcr.uniqueid(pcr.defined(field)))
    zoneID = pcr.spreadzone(cellID, 0, 1)
    field = pcr.areamajority(field, zoneID)
    subdomains_initial = field
    subdomains_initial = pcr.areamajority(subdomains_initial, catchment_map)
    pcr.aguila(subdomains_initial)

    pcr.report(subdomains_initial, "global_subdomains_30min_initial.map")

    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[0])))
    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1])))

    # ~ print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial_clump))[0])))
    # ~ print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial_clump))[1])))

    print("Checking all subdomains, avoid too large subdomains")

    num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1])

    # clone code that will be assigned
    assigned_number = 0

    subdomains_final = pcr.ifthen(
        pcr.scalar(subdomains_initial) < -7777, pcr.nominal(0))

    for nr in range(1, num_of_masks + 1, 1):

        msg = "Processing the landmask %s" % (str(nr))
        print(msg)

        mask_selected_boolean = pcr.ifthen(subdomains_initial == nr,
                                           pcr.boolean(1.0))

        # ~ if nr == 1: pcr.aguila(mask_selected_boolean)

        xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean)
        area_in_degree2 = (xmax - xmin) * (ymax - ymin)

        # ~ print(str(area_in_degree2))

        # check whether the size of bounding box is ok
        # - initial check value
        check_ok = True

        reference_area_in_degree2 = 2500.
        if area_in_degree2 > 1.50 * reference_area_in_degree2: check_ok = False
        if (xmax - xmin) > 10 * (ymax - ymin): check_ok = False

        if check_ok == True:

            msg = "Clump is not needed."
            msg = "\n\n" + str(msg) + "\n\n"
            print(msg)

            # assign the clone code
            assigned_number = assigned_number + 1

            # update global landmask for river and land
            mask_selected_nominal = pcr.ifthen(mask_selected_boolean,
                                               pcr.nominal(assigned_number))
            subdomains_final = pcr.cover(subdomains_final,
                                         mask_selected_nominal)

        if check_ok == False:

            msg = "Clump is needed."
            msg = "\n\n" + str(msg) + "\n\n"
            print(msg)

            # make clump
            clump_ids = pcr.nominal(pcr.clump(mask_selected_boolean))

            # merge clumps that are close together
            clump_ids_window_majority = pcr.windowmajority(clump_ids, 10.0)
            clump_ids = pcr.areamajority(clump_ids_window_majority, clump_ids)
            # ~ pcr.aguila(clump_ids)

            # minimimum and maximum values
            min_clump_id = int(
                pcr.cellvalue(pcr.mapminimum(pcr.scalar(clump_ids)), 1)[0])
            max_clump_id = int(
                pcr.cellvalue(pcr.mapmaximum(pcr.scalar(clump_ids)), 1)[0])

            for clump_id in range(min_clump_id, max_clump_id + 1, 1):

                msg = "Processing the clump %s of %s from the landmask %s" % (
                    str(clump_id), str(max_clump_id), str(nr))
                msg = "\n\n" + str(msg) + "\n\n"
                print(msg)

                # identify mask based on the clump
                mask_selected_boolean_from_clump = pcr.ifthen(
                    clump_ids == pcr.nominal(clump_id), mask_selected_boolean)
                mask_selected_boolean_from_clump = pcr.ifthen(
                    mask_selected_boolean_from_clump,
                    mask_selected_boolean_from_clump)

                # check whether the clump is empty
                check_mask_selected_boolean_from_clump = pcr.ifthen(
                    mask_selected_boolean, mask_selected_boolean_from_clump)
                check_if_empty = float(
                    pcr.cellvalue(
                        pcr.mapmaximum(
                            pcr.scalar(
                                pcr.defined(
                                    check_mask_selected_boolean_from_clump))),
                        1)[0])

                if check_if_empty == 0.0:

                    msg = "Map is empty !"
                    msg = "\n\n" + str(msg) + "\n\n"
                    print(msg)

                else:

                    msg = "Map is NOT empty !"
                    msg = "\n\n" + str(msg) + "\n\n"
                    print(msg)

                    # assign the clone code
                    assigned_number = assigned_number + 1

                    # update global landmask for river and land
                    mask_selected_nominal = pcr.ifthen(
                        mask_selected_boolean_from_clump,
                        pcr.nominal(assigned_number))
                    subdomains_final = pcr.cover(subdomains_final,
                                                 mask_selected_nominal)

    # ~ # kill all aguila processes if exist
    # ~ os.system('killall aguila')

    pcr.aguila(subdomains_final)

    print("")
    print("")
    print("")

    print("The subdomain map is READY.")

    pcr.report(subdomains_final, "global_subdomains_30min_final.map")

    num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_final))[1])
    print(num_of_masks)

    print("")
    print("")
    print("")

    for nr in range(1, num_of_masks + 1, 1):

        mask_selected_boolean = pcr.ifthen(subdomains_final == nr,
                                           pcr.boolean(1.0))

        xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean)
        area_in_degree2 = (xmax - xmin) * (ymax - ymin)

        print(
            str(nr) + " ; " + str(area_in_degree2) + " ; " +
            str((xmax - xmin)) + " ; " + str((ymax - ymin)))

    print("")
    print("")
    print("")

    print(num_of_masks)
def joinMaps(inputTuple):
	'''Merges maps starting from an input tuple that specifies the output map name, the number of rows\
 and the number rows, columns, ULL X and Y coordinates, cell length and the missing value identifer and a list of input maps'''
	outputFileName= inputTuple[0]
	nrRows= inputTuple[1]
	nrCols= inputTuple[2]
	xMin= inputTuple[3]
	yMax= inputTuple[4]
	cellLength= inputTuple[5]
	MV= inputTuple[6]
	fileNames= inputTuple[7]
	cloneFileName= inputTuple[8]
	#-echo to screen/logger
	msg = 'combining files for %s' % outputFileName,
	logger.info(msg)
	#-get extent
	xMax= xMin+nrCols*cellLength
	yMin= yMax-nrRows*cellLength
	xCoordinates= xMin+np.arange(nrCols+1)*cellLength
	yCoordinates= yMin+np.arange(nrRows+1)*cellLength
	yCoordinates= np.flipud(yCoordinates)
	msg = 'between %.2f, %.2f and %.2f, %.2f' % (xMin,yMin,xMax,yMax)
	logger.info(msg)

	#~ #-set output array
	#~ variableArray= np.ones((nrRows,nrCols))*MV
	#-set initial output aaray to zero
	variableArray= np.zeros((nrRows,nrCols))*MV

	#-iterate over maps
	for fileName in fileNames:
		
		print fileName
		attributeClone= getMapAttributesALL(fileName)
		cellLengthClone= attributeClone['cellsize']
		rowsClone= attributeClone['rows']
		colsClone= attributeClone['cols']
		xULClone= attributeClone['xUL']
		yULClone= attributeClone['yUL']
		# check whether both maps have the same attributes and process
		process, nd= checkResolution(cellLength,cellLengthClone)
		
		if process:
			#-get coordinates and locations
			sampleXMin= xULClone
			sampleXMax= xULClone+colsClone*cellLengthClone
			sampleYMin= yULClone-rowsClone*cellLengthClone
			sampleYMax= yULClone
			sampleXCoordinates= sampleXMin+np.arange(colsClone+1)*cellLengthClone
			sampleYCoordinates= sampleYMin+np.arange(rowsClone+1)*cellLengthClone
			sampleYCoordinates= np.flipud(sampleYCoordinates)
			sampleXMin= getMax(xMin,sampleXMin)
			sampleXMax= getMin(xMax,sampleXMax)
			sampleYMin= getMax(yMin,sampleYMin)
			sampleYMax= getMin(yMax,sampleYMax)
			sampleRow0= getPosition(sampleYMin,sampleYCoordinates,nd)
			sampleRow1= getPosition(sampleYMax,sampleYCoordinates,nd)			
			sampleCol0= getPosition(sampleXMin,sampleXCoordinates,nd)
			sampleCol1= getPosition(sampleXMax,sampleXCoordinates,nd)
			sampleRow0, sampleRow1= checkRowPosition(sampleRow0,sampleRow1)
			variableRow0= getPosition(sampleYMin,yCoordinates,nd)
			variableRow1= getPosition(sampleYMax,yCoordinates,nd)
			variableCol0= getPosition(sampleXMin,xCoordinates,nd)
			variableCol1= getPosition(sampleXMax,xCoordinates,nd)
			variableRow0,variableRow1= checkRowPosition(variableRow0,variableRow1)
			#-read sample array
			setclone(fileName)
			sampleArray= pcr2numpy(readmap(fileName),MV)
			
			print sampleArray
			
			sampleNrRows, sampleNrCols= sampleArray.shape

			# -create mask
			#~ mask= (variableArray[variableRow0:variableRow1,variableCol0:variableCol1] == MV) &\
				#~ (sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1] <> MV)
			mask= (variableArray[variableRow0:variableRow1,variableCol0:variableCol1] <> MV) &\
				(sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1] <> MV)

			#-add values
			msg = ' adding values in %d, %d rows, columns from (x, y) %.3f, %.3f and %.3f, %.3f to position (row, col) %d, %d and %d, %d' %\
				(sampleNrRows, sampleNrCols,sampleXMin,sampleYMin,sampleXMax,sampleYMax,variableRow0,variableCol0,variableRow1,variableCol1)
			logger.info(msg)	
	
			#~ variableArray[variableRow0:variableRow1,variableCol0:variableCol1][mask]= \
				#~ sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1][mask]
	
			variableArray[variableRow0:variableRow1,variableCol0:variableCol1][mask] += sampleArray[sampleRow0:sampleRow1,sampleCol0:sampleCol1][mask]

		else:

			msg = '%s does not match resolution and is not processed' % fileName
			logger.warning(msg)

	#-report output map
	setclone(cloneFileName)
	report(numpy2pcr(Scalar,variableArray,MV),outputFileName)
Пример #32
0
 def test_2(self):
     """ unpickle boolean """
     field_pkl = pickle.load(open("pickle_boolean.pkl", "rb"))
     pcraster.report(field_pkl, "bla.map")
     self.failUnless(
         self.mapEqualsValidated(field_pkl, "boolean_Result.map"))
def pcr_cover_nodata(map_filename, nullmask_map):
    #Cover nodata
    input_map = pcr.readmap(map_filename)
    map_no_data_covered = pcr.cover(input_map, pcr.scalar(nullmask_map))
    pcr.report(map_no_data_covered, map_filename)
    return
Пример #34
0
 def test_4(self):
     """ unpickle ordinal """
     field_pkl = pickle.load(open("pickle_ordinal.pkl", "rb"))
     pcraster.report(field_pkl, "bla_ordinal.map")
     self.failUnless(
         self.mapEqualsValidated(field_pkl, "ordinal_Result.map"))
Пример #35
0
 def test_5(self):
     """ unpickle scalar """
     field_pkl = pickle.load(open("pickle_scalar.pkl", "rb"))
     pcraster.report(field_pkl, "bla_scalar.map")
     self.failUnless(self.mapEqualsValidated(field_pkl, "sin_Result.map"))
Пример #36
0
 def test_6(self):
     """ unpickle directional """
     field_pkl = pickle.load(open("pickle_direction.pkl", "rb"))
     pcraster.report(field_pkl, "bla_direction.map")
     self.failUnless(
         self.mapEqualsValidated(field_pkl, "directional_Result1.map"))
Пример #37
0
                return_period) + "_of_" + varDict.netcdf_long_name[var_name]
            #
            netcdf_report.create_variable(\
                                          ncFileName = netcdf_file[bias_type][var_name]['file_name'], \
                                          varName    = variable_name, \
                                          varUnit    = variable_unit, \
                                          longName   = var_long_name, \
                                          comment    = varDict.comment[var_name]
                                          )

        # store the variables to pcraster map and netcdf files:
        data_dictionary = {}
        for return_period in return_periods:

            # variable name
            variable_name = str(
                return_period) + "_of_" + varDict.netcdf_short_name[var_name]

            # report to a pcraster map
            pcr.report(extreme_values[bias_type][return_period],
                       bias_type + "_" + variable_name + ".map")

            # put it into a dictionary
            data_dictionary[variable_name] = pcr.pcr2numpy(
                extreme_values[bias_type][return_period], vos.MV)

        # save the variables to a netcdf file
        netcdf_report.dictionary_of_data_to_netcdf(netcdf_file[bias_type][var_name]['file_name'], \
                                                   data_dictionary, \
                                                   timeBounds)
Пример #38
0
# -*- coding: utf-8 -*-
"""
Created on Sun Nov  4 21:18:27 2018

@author: Chinmay
"""
import os
import pcraster as pcr

dem = 'D:/Chinmay/Hydro_modeling_erin_fall_2018/sp_model/dem.map'
output_path = 'D:/Chinmay/Hydro_modeling_erin_fall_2018/sp_model/'

pcr.setclone(dem)

slope = pcr.slope(dem)
pcr.report(slope, output_path + "gradient.map")

ldd = pcr.lddcreate(dem, 1e31, 1e31, 1e31, 1e31)
pcr.report(ldd, output_path + "ldd.map")

os.system('augila ldd.map')
Пример #39
0
    def adusting_parameters(self, configuration, system_argument):

        # it is also possible to define prefactors via the ini/configuration file:
        # - this will be overwrite any previous given pre-multipliers
        if 'prefactorOptions' in configuration.allSections:

            logger.info(
                "Adjusting some model parameters based on given values in the ini/configuration file."
            )

            # linear scale  # Note that this one does NOT work for the changing WMIN or Joyce land cover options.
            self.multiplier_for_refPotET = float(
                configuration.
                prefactorOptions['linear_multiplier_for_refPotET'])
            multiplier_for_degreeDayFactor = float(
                configuration.prefactorOptions[
                    'linear_multiplier_for_degreeDayFactor'])  # linear scale
            multiplier_for_minSoilDepthFrac = float(
                configuration.prefactorOptions[
                    'linear_multiplier_for_minSoilDepthFrac'])  # linear scale
            multiplier_for_kSat = float(
                configuration.prefactorOptions['log_10_multiplier_for_kSat']
            )  # log scale
            multiplier_for_storCap = float(
                configuration.prefactorOptions['linear_multiplier_for_storCap']
            )  # linear scale
            multiplier_for_recessionCoeff = float(
                configuration.prefactorOptions[
                    'log_10_multiplier_for_recessionCoeff'])  # log scale

        # saving global pre-multipliers to the log file:
        msg = "\n"
        msg += "\n"
        msg += "Multiplier values used: " + "\n"
        msg += "For minSoilDepthFrac           : " + \
            str(multiplier_for_minSoilDepthFrac)+"\n"
        msg += "For kSat (log-scale)           : " + \
            str(multiplier_for_kSat)+"\n"
        msg += "For recessionCoeff (log-scale) : " + \
            str(multiplier_for_recessionCoeff)+"\n"
        msg += "For storCap                    : " + \
            str(multiplier_for_storCap)+"\n"
        msg += "For degreeDayFactor            : " + \
            str(multiplier_for_degreeDayFactor)+"\n"
        msg += "For refPotET                   : " + \
            str(self.multiplier_for_refPotET)+"\n"
        logger.info(msg)
        # - also to a txt file
        # this will be stored in the "map" folder of the 'outputDir' (as we set the current working directory to this "map" folder, see configuration.py)
        f = open("multiplier.txt", "w")
        f.write(msg)
        f.close()

        # set parameter "recessionCoeff" based on the given pre-multiplier
        # - also saving the adjusted parameter maps to pcraster files
        # - these will be stored in the "map" folder of the 'outputDir' (as we set the current working directory to this "map" folder, see configuration.py)
        # "recessionCoeff"
        # minimum value is zero and using log-scale
        self.model.groundwater.recessionCoeff = pcr.max(
            0.0, (10**(multiplier_for_recessionCoeff)) *
            self.model.groundwater.recessionCoeff)
        self.model.groundwater.recessionCoeff = pcr.min(
            1.0, self.model.groundwater.recessionCoeff)
        # report the map
        pcr.report(self.model.groundwater.recessionCoeff, "recessionCoeff.map")

        # set parameters "kSat", "storCap", "minSoilDepthFrac", and "degreeDayFactor" based on the given pre-multipliers
        for coverType in self.model.landSurface.coverTypes:

            # "degreeDayFactor"
            self.model.landSurface.landCoverObj[
                coverType].degreeDayFactor = pcr.max(
                    0.0, multiplier_for_degreeDayFactor * self.model.
                    landSurface.landCoverObj[coverType].degreeDayFactor)
            # report the map
            pcraster_filename = "degreeDayFactor" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].degreeDayFactor,
                pcraster_filename)

        # "kSat" and "storCap" for 2 layer model
        if self.model.landSurface.numberOfSoilLayers == 2:

            # "kSat"
            # minimum value is zero and using-log-scale
            self.model.landSurface.parameters.kSatUpp = \
                pcr.max(0.0, (10**(multiplier_for_kSat)) *
                        self.model.landSurface.parameters.kSatUpp)
            self.model.landSurface.parameters.kSatLow = \
                pcr.max(0.0, (10**(multiplier_for_kSat)) *
                        self.model.landSurface.parameters.kSatLow)

            # report the maps (for debugging)
            #pcraster_filename = "kSatUpp"+ "_" + coverType + ".map"
            #pcr.report(self.model.landSurface.parameters.kSatUpp, pcraster_filename)
            #pcraster_filename = "kSatLow"+ "_" + coverType + ".map"
            #pcr.report(self.model.landSurface.parameters.kSatLow, pcraster_filename)

            # "storCap"
            # minimum value is zero
            self.model.landSurface.parameters.storCapUpp = pcr.max(
                0.0, multiplier_for_storCap *
                self.model.landSurface.parameters.storCapUpp)
            self.model.landSurface.parameters.storCapLow = pcr.max(
                0.0, multiplier_for_storCap *
                self.model.landSurface.parameters.storCapLow)
            # report the maps (for debugging)
            #pcraster_filename = "storCapUpp"+ "_" + coverType + ".map"
            #pcr.report(self.model.landSurface.parameters.storCapUpp, pcraster_filename)
            #pcraster_filename = "storCapLow"+ "_" + coverType + ".map"
            #pcr.report(self.model.landSurface.parameters.storCapLow, pcraster_filename)

        # "kSat" and "storCap" for 3 layer model
        if self.model.landSurface.numberOfSoilLayers == 3:

            # "kSat"
            # minimum value is zero and using-log-scale
            self.model.landSurface.landCoverObj[coverType].parameters.kSatUpp000005 = \
                pcr.max(0.0, (10**(multiplier_for_kSat)) *
                        self.model.landSurface.landCoverObj[coverType].parameters.kSatUpp000005)
            self.model.landSurface.landCoverObj[coverType].parameters.kSatUpp005030 = \
                pcr.max(0.0, (10**(multiplier_for_kSat)) *
                        self.model.landSurface.landCoverObj[coverType].parameters.kSatUpp005030)
            self.model.landSurface.landCoverObj[coverType].parameters.kSatLow030150 = \
                pcr.max(0.0, (10**(multiplier_for_kSat)) *
                        self.model.landSurface.landCoverObj[coverType].parameters.kSatLow030150)
            # report the maps
            pcraster_filename = "kSatUpp000005" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].parameters.
                kSatUpp000005, pcraster_filename)
            pcraster_filename = "kSatUpp005030" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].parameters.
                kSatUpp005030, pcraster_filename)
            pcraster_filename = "kSatLow030150" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].parameters.
                kSatLow030150, pcraster_filename)

            # "storCap"
            # minimum value is zero
            self.model.landSurface.landCoverObj[
                coverType].parameters.storCapUpp000005 = pcr.max(
                    0.0, multiplier_for_storCap * self.model.landSurface.
                    landCoverObj[coverType].parameters.storCapUpp000005)
            self.model.landSurface.landCoverObj[
                coverType].parameters.storCapUpp005030 = pcr.max(
                    0.0, multiplier_for_storCap * self.model.landSurface.
                    landCoverObj[coverType].parameters.storCapUpp005030)
            self.model.landSurface.landCoverObj[
                coverType].parameters.storCapLow030150 = pcr.max(
                    0.0, multiplier_for_storCap * self.model.landSurface.
                    landCoverObj[coverType].parameters.storCapLow030150)
            # report the maps
            pcraster_filename = "storCapUpp000005" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].parameters.
                storCapUpp000005, pcraster_filename)
            pcraster_filename = "storCapUpp005030" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].parameters.
                storCapUpp005030, pcraster_filename)
            pcraster_filename = "storCapLow030150" + "_" + coverType + ".map"
            pcr.report(
                self.model.landSurface.landCoverObj[coverType].parameters.
                storCapLow030150, pcraster_filename)

        # re-calculate rootZoneWaterStorageCap as the consequence of the modification of "storCap"
        # This is WMAX in the oldcalc script.
        if self.model.landSurface.numberOfSoilLayers == 2:
            self.model.landSurface.parameters.rootZoneWaterStorageCap = self.model.landSurface.parameters.storCapUpp +\
                self.model.landSurface.parameters.storCapLow
        if self.model.landSurface.numberOfSoilLayers == 3:
            self.model.landSurface.landCoverObj[coverType].parameters.rootZoneWaterStorageCap = self.model.landSurface.landCoverObj[coverType].parameters.storCapUpp000005 +\
                self.model.landSurface.landCoverObj[coverType].parameters.storCapUpp005030 +\
                self.model.landSurface.landCoverObj[coverType].parameters.storCapLow030150
        # report the map
        #pcraster_filename = "rootZoneWaterStorageCap"+ "_" + coverType + ".map"
        #pcr.report(self.model.landSurface.parameters.rootZoneWaterStorageCap, pcraster_filename)

        # "minSoilDepthFrac"
        if multiplier_for_minSoilDepthFrac != 1.0:

            for coverType in self.model.landSurface.coverTypes:

                # minimum value is zero
                self.model.landSurface.landCoverObj[
                    coverType].minSoilDepthFrac = pcr.max(
                        0.0, multiplier_for_minSoilDepthFrac * self.model.
                        landSurface.landCoverObj[coverType].minSoilDepthFrac)
                # for minSoilDepthFrac - values will be limited by maxSoilDepthFrac
                self.model.landSurface.landCoverObj[
                    coverType].minSoilDepthFrac = pcr.min(
                        self.model.landSurface.landCoverObj[coverType].
                        minSoilDepthFrac, self.model.landSurface.
                        landCoverObj[coverType].maxSoilDepthFrac)
                # maximum value is 1.0
                self.model.landSurface.landCoverObj[
                    coverType].minSoilDepthFrac = pcr.min(
                        1.0, self.model.landSurface.landCoverObj[coverType].
                        minSoilDepthFrac)
                # report the map
                pcraster_filename = "minSoilDepthFrac" + "_" + coverType + ".map"
                pcr.report(
                    self.model.landSurface.landCoverObj[coverType].
                    minSoilDepthFrac, pcraster_filename)

                # re-calculate arnoBeta (as the consequence of the modification of minSoilDepthFrac)
                self.model.landSurface.landCoverObj[
                    coverType].arnoBeta = pcr.max(
                        0.001, (self.model.landSurface.landCoverObj[coverType].
                                maxSoilDepthFrac - 1.) /
                        (1. - self.model.landSurface.landCoverObj[coverType].
                         minSoilDepthFrac) +
                        self.model.landSurface.parameters.orographyBeta - 0.01)
                self.model.landSurface.landCoverObj[
                    coverType].arnoBeta = pcr.cover(
                        pcr.max(
                            0.001, self.model.landSurface.
                            landCoverObj[coverType].arnoBeta), 0.001)
                # report the map
                pcraster_filename = "arnoBeta" + "_" + coverType + ".map"
                pcr.report(
                    self.model.landSurface.landCoverObj[coverType].arnoBeta,
                    pcraster_filename)

                # re-calculate rootZoneWaterStorageMin (as the consequence of the modification of minSoilDepthFrac)
                # This is WMIN in the oldcalc script.
                # WMIN (unit: m): minimum local soil water capacity within the grid-cell
                self.model.landSurface.landCoverObj[coverType].rootZoneWaterStorageMin = self.model.landSurface.landCoverObj[coverType].minSoilDepthFrac *\
                    self.model.landSurface.parameters.rootZoneWaterStorageCap
                # report the map
                pcraster_filename = "rootZoneWaterStorageMin" + "_" + coverType + ".map"
                pcr.report(
                    self.model.landSurface.landCoverObj[coverType].
                    rootZoneWaterStorageMin, pcraster_filename)

                # re-calculate rootZoneWaterStorageRange (as the consequence of the modification of rootZoneWaterStorageRange and minSoilDepthFrac)
                # WMAX - WMIN (unit: m)
                self.model.landSurface.landCoverObj[coverType].rootZoneWaterStorageRange = self.model.landSurface.parameters.rootZoneWaterStorageCap -\
                    self.model.landSurface.landCoverObj[coverType].rootZoneWaterStorageMin
Пример #40
0
 def testOrdinal2Nominal(self):
   ordinalMap = ordinal(pcraster.readmap("areaarea_Class.map"))
   self.assertEqual(ordinalMap.dataType(), pcraster.VALUESCALE.Ordinal)
   nominalMap = nominal(ordinalMap)
   pcraster.report(nominalMap, "nominal.map")
   self.assertEqual(nominalMap.dataType(), pcraster.VALUESCALE.Nominal)
Пример #41
0
 def test_4(self):
   """ unpickle ordinal """
   field_pkl = pickle.load(open("pickle_ordinal.pkl", "r"))
   pcraster.report(field_pkl, "bla_ordinal.map")
   self.failUnless(self.mapEqualsValidated(field_pkl, "ordinal_Result.map"))
Пример #42
0
def getQAtBasinMouths(discharge, basinMouth):
    temp = pcr.ifthenelse(basinMouth != 0 , discharge * secondsPerDay(),0.)
    pcr.report(temp,"temp.map")
    return (getMapTotal(temp)  / 1e9)
Пример #43
0
 def test_6(self):
   """ unpickle directional """
   field_pkl = pickle.load(open("pickle_direction.pkl", "r"))
   pcraster.report(field_pkl, "bla_direction.map")
   self.failUnless(self.mapEqualsValidated(field_pkl, "directional_Result1.map"))
    # ignoring some variable names
    variable_names.pop('lat','')
    variable_names.pop('lon','')
    variable_names.pop('latiudes','')
    variable_names.pop('longitudes','')
    variable_names.pop('latiude','')
    variable_names.pop('longitude','')
    variable_names.pop('time','')
    # use the first variable
    variable_name = str(variable_names[0])
msg = 'Converting '+variable_name+' from the file:'+input_netcdf_filename+' to '+output_pcraster_filename
print msg    

# set date_yyyy_mm_dd
date_yyyy_mm_dd = None
if len(sys.argv) > 5: date_yyyy_mm_dd = sys.argv[5]

# read netcdf file
if date_yyyy_mm_dd == None:
    map_value = vos.netcdf2PCRobjCloneWithoutTime(input_netcdf_filename,\
                                                  variable_name,\
                                                  clone_map_filename)
else:                                                  
    map_value = vos.netcdf2PCRobjClone(input_netcdf_filename,\
                                       variable_name,\
                                       date_yyyy_mm_dd,\
                                       clone_map_filename)
    
# save the map as pcraster map
pcr.report(map_value, output_pcraster_filename)
                                          ncFileName = netcdf_file[bias_type][var_name]['file_name'], \
                                          varName    = variable_name, \
                                          varUnit    = variable_unit, \
                                          longName   = var_long_name, \
                                          comment    = varDict.comment[var_name]
                                          )
        
        # store the variables to pcraster map and netcdf files:
        data_dictionary = {}
        for return_period in return_periods:
            
            # variable name
            variable_name = str(return_period) + "_of_" + varDict.netcdf_short_name[var_name]
            
            # report to a pcraster map
            pcr.report(pcr.ifthen(landmask, extreme_values[bias_type][return_period]), bias_type + "_" + variable_name + ".map")
        
            # put it into a dictionary
            data_dictionary[variable_name] = pcr.pcr2numpy(extreme_values[bias_type][return_period], vos.MV)
        
        # save the variables to a netcdf file
        netcdf_report.dictionary_of_data_to_netcdf(netcdf_file[bias_type][var_name]['file_name'], \
                                                   data_dictionary, \
                                                   timeBounds)

    # saving "return_period_historical":  the return period in present days (historical run) belonging to future extreme values
    # - to pcraster files only
    for return_period in return_periods:

        # report to a pcraster map
        pcr.report(pcr.ifthen(landmask, extreme_values['return_period_historical'][return_period]), 'return_period_historical_corresponding_to' + "_" + str(return_period) + ".map")
Пример #46
0
def build_model(
    geojson_path,
    cellsize,
    model,
    timestep,
    name,
    case_template,
    case_path,
    fews,
    fews_config_path,
    dem_path,
    river_path,
    outlet_path,
    region_filter,
):
    """Prepare a simple WFlow model, anywhere, based on global datasets."""

    # lists below need to stay synchronized, not sure of a better way
    [
        geojson_path,
        model,
        timestep,
        name,
        case_template,
        case_path,
        fews_config_path,
        dem_path,
        river_path,
        outlet_path,
        region_filter,
    ] = [
        encode_utf8(p) for p in [
            geojson_path,
            model,
            timestep,
            name,
            case_template,
            case_path,
            fews_config_path,
            dem_path,
            river_path,
            outlet_path,
            region_filter,
        ]
    ]

    # fill in the dependent defaults
    if name is None:
        name = "wflow_{}_case".format(model)
    if case_template is None:
        case_template = "wflow_{}_template".format(model)
    if model == "hbv":
        if timestep == "hourly":
            case_template = "wflow_{}_hourly_template".format(model)
        else:
            case_template = "wflow_{}_daily_template".format(model)

    # assumes it is in decimal degrees, see Geod
    case = os.path.join(case_path, name)
    path_catchment = os.path.join(case, "data/catchments/catchments.geojson")

    region = hydro_engine_geometry(geojson_path, region_filter)

    # get the centroid of the region, such that we have a point for unit conversion
    centroid = sg.shape(region).centroid
    x, y = centroid.x, centroid.y

    filter_upstream_gt = 1000
    crs = "EPSG:4326"

    g = Geod(ellps="WGS84")
    # convert to meters in the center of the grid
    # Earth Engine expects meters
    _, _, crossdist_m = g.inv(x, y, x + cellsize, y + cellsize)
    cellsize_m = sqrt(0.5 * crossdist_m**2)

    # start by making case an exact copy of the template
    copycase(case_template, case)

    # create folder structure for data folder
    for d in ["catchments", "dem", "rivers"]:
        dir_data = os.path.join(case, "data", d)
        ensure_dir_exists(dir_data)

    # create grid
    path_log = "wtools_create_grid.log"
    dir_mask = os.path.join(case, "mask")
    projection = "EPSG:4326"

    download_catchments(region,
                        path_catchment,
                        geojson_path,
                        region_filter=region_filter)
    cg_extent = path_catchment

    cg.main(path_log,
            dir_mask,
            cg_extent,
            projection,
            cellsize,
            locationid=name,
            snap=True)
    mask_tif = os.path.join(dir_mask, "mask.tif")

    with rasterio.open(mask_tif) as ds:
        bbox = ds.bounds

    # create static maps
    dir_dest = os.path.join(case, "staticmaps")
    # use custom inifile, default high res ldd takes too long
    path_inifile = os.path.join(case, "data/staticmaps.ini")
    path_dem_in = os.path.join(case, "data/dem/dem.tif")
    dir_lai = os.path.join(case, "data/parameters/clim")

    if river_path is None:
        # download the global dataset
        river_data_path = os.path.join(case, "data/rivers/rivers.geojson")
        # raise ValueError("User must supply river_path for now, see hydro-engine#14")
        download_rivers(region,
                        river_data_path,
                        filter_upstream_gt,
                        region_filter=region_filter)
    else:
        # take the local dataset, reproject and clip
        # command line equivalent of
        # ogr2ogr -t_srs EPSG:4326 -f GPKG -overwrite -clipdst xmin ymin xmax ymax rivers.gpkg rivers.shp
        river_data_path = os.path.join(case, "data/rivers/rivers.gpkg")
        ogr2ogr.main([
            "",
            "-t_srs",
            "EPSG:4326",
            "-f",
            "GPKG",
            "-overwrite",
            "-clipdst",
            str(bbox.left),
            str(bbox.bottom),
            str(bbox.right),
            str(bbox.top),
            river_data_path,
            river_path,
        ])

    if dem_path is None:
        # download the global dem
        download_raster(region,
                        path_dem_in,
                        "dem",
                        cellsize_m,
                        crs,
                        region_filter=region_filter)
    else:
        # warp the local dem onto model grid
        wt.warp_like(
            dem_path,
            path_dem_in,
            mask_tif,
            format="GTiff",
            co={"dtype": "float32"},
            resampling=warp.Resampling.med,
        )

    other_maps = {
        "sbm": [
            "FirstZoneCapacity",
            "FirstZoneKsatVer",
            "FirstZoneMinCapacity",
            "InfiltCapSoil",
            "M",
            "PathFrac",
            "WaterFrac",
            "thetaS",
            "soil_type",
            "landuse",
        ],
        "hbv": [
            "BetaSeepage",
            "Cfmax",
            "CFR",
            "FC",
            "K0",
            "LP",
            "Pcorr",
            "PERC",
            "SFCF",
            "TT",
            "WHC",
        ],
    }

    # TODO rename these in hydro-engine
    newnames = {
        "FirstZoneKsatVer": "KsatVer",
        "FirstZoneMinCapacity": "SoilMinThickness",
        "FirstZoneCapacity": "SoilThickness",
        "landuse": "wflow_landuse",
        "soil_type": "wflow_soil",
    }

    # destination paths
    path_other_maps = []
    for param in other_maps[model]:
        path = os.path.join(case, "data/parameters",
                            newnames.get(param, param) + ".tif")
        path_other_maps.append(path)

    for param, path in zip(other_maps[model], path_other_maps):
        if model == "sbm":
            download_raster(region,
                            path,
                            param,
                            cellsize_m,
                            crs,
                            region_filter=region_filter)
        elif model == "hbv":
            # these are not yet in the earth engine, use local paths
            if timestep == "hourly":
                path_staticmaps_global = (
                    r"p:\1209286-earth2observe\HBV-GLOBAL\staticmaps_hourly")
            else:
                path_staticmaps_global = (
                    r"p:\1209286-earth2observe\HBV-GLOBAL\staticmaps")
            path_in = os.path.join(path_staticmaps_global, param + ".tif")

            # warp the local staticmaps onto model grid
            wt.warp_like(
                path_in,
                path,
                mask_tif,
                format="GTiff",
                co={"dtype": "float32"},
                resampling=warp.Resampling.med,
            )

    if model == "sbm":
        ensure_dir_exists(dir_lai)
        for m in range(1, 13):
            mm = str(m).zfill(2)
            path = os.path.join(dir_lai, "LAI00000.0{}".format(mm))
            download_raster(
                region,
                path,
                "LAI{}".format(mm),
                cellsize_m,
                crs,
                region_filter=region_filter,
            )
    else:
        # TODO this creates defaults in static_maps, disable this behavior?
        # or otherwise adapt static_maps for the other models
        dir_lai = None

    # create default folder structure for running wflow
    dir_inmaps = os.path.join(case, "inmaps")
    ensure_dir_exists(dir_inmaps)
    dir_instate = os.path.join(case, "instate")
    ensure_dir_exists(dir_instate)
    for d in [
            "instate", "intbl", "intss", "outmaps", "outstate", "outsum",
            "runinfo"
    ]:
        dir_run = os.path.join(case, "run_default", d)
        ensure_dir_exists(dir_run)

    if outlet_path is None:
        # this is for coastal catchments only, if it is not coastal and no outlets
        # are found, then it will just be the pit of the ldd
        outlets = outlets_coords(path_catchment, river_data_path)
    else:
        # take the local dataset, reproject and clip
        outlet_data_path = os.path.join(case, "data/rivers/outlets.gpkg")
        ogr2ogr.main([
            "",
            "-t_srs",
            "EPSG:4326",
            "-f",
            "GPKG",
            "-overwrite",
            "-clipdst",
            str(bbox.left),
            str(bbox.bottom),
            str(bbox.right),
            str(bbox.top),
            outlet_data_path,
            outlet_path,
        ])
        x = []
        y = []
        with fiona.open(outlet_data_path) as c:
            for f in c:
                coords = f["geometry"]["coordinates"]
                x.append(coords[0])
                y.append(coords[1])
            outlets_x = np.array(x)
            outlets_y = np.array(y)
        outlets = outlets_x, outlets_y

    sm.main(
        dir_mask,
        dir_dest,
        path_inifile,
        path_dem_in,
        river_data_path,
        path_catchment,
        lai=dir_lai,
        other_maps=path_other_maps,
        outlets=outlets,
    )

    if fews:
        # save default state-files in FEWS-config
        dir_state = os.path.join(case, "outstate")
        ensure_dir_exists(dir_state)
        if model == "sbm":
            state_files = [
                "CanopyStorage.map",
                "GlacierStore.map",
                "ReservoirVolume.map",
                "SatWaterDepth.map",
                "Snow.map",
                "SnowWater.map",
                "SurfaceRunoff.map",
                "SurfaceRunoffDyn.map",
                "TSoil.map",
                "UStoreLayerDepth_0.map",
                "WaterLevel.map",
                "WaterLevelDyn.map",
            ]
        elif model == "hbv":
            state_files = [
                "DrySnow.map",
                "FreeWater.map",
                "InterceptionStorage.map",
                "LowerZoneStorage.map",
                "SoilMoisture.map",
                "SurfaceRunoff.map",
                "UpperZoneStorage.map",
                "WaterLevel.map",
            ]
        zip_name = name + "_GA_Historical default.zip"

        zip_loc = os.path.join(fews_config_path, "ColdStateFiles", zip_name)
        path_csf = os.path.dirname(zip_loc)
        ensure_dir_exists(path_csf)

        mask = pcr.readmap(os.path.join(dir_mask, "mask.map"))

        with zipfile.ZipFile(zip_loc, mode="w") as zf:
            for state_file in state_files:
                state_path = os.path.join(dir_state, state_file)
                pcr.report(pcr.cover(mask, pcr.scalar(0)), state_path)
                zf.write(state_path,
                         state_file,
                         compress_type=zipfile.ZIP_DEFLATED)
            # 
            netcdf_report.create_variable(\
                                          ncFileName = netcdf_file[bias_type][var_name]['file_name'], \
                                          varName    = variable_name, \
                                          varUnit    = variable_unit, \
                                          longName   = var_long_name, \
                                          comment    = varDict.comment[var_name]
                                          )
        
        # store the variables to pcraster map and netcdf files:
        data_dictionary = {}
        for return_period in return_periods:
            
            # variable name
            variable_name = str(return_period) + "_of_" + varDict.netcdf_short_name[var_name]
            
            # report to a pcraster map
            pcr.report(extreme_values[bias_type][return_period], bias_type + "_" + variable_name + ".map")
        
            # put it into a dictionary
            data_dictionary[variable_name] = pcr.pcr2numpy(extreme_values[bias_type][return_period], vos.MV)
        
        # save the variables to a netcdf file
        netcdf_report.dictionary_of_data_to_netcdf(netcdf_file[bias_type][var_name]['file_name'], \
                                                   data_dictionary, \
                                                   timeBounds)




Пример #48
0
def writePCRmapToDir(v, outFileName, outDir):
    # v: inputMapFileName or floating values
    # cloneMapFileName: If the inputMap and cloneMap have different clones,
    #                   resampling will be done. Then,
    fullFileName = getFullPath(outFileName, outDir)
    pcr.report(v, fullFileName)
Пример #49
0
    
    # landmask                               
    landmask = pcr.defined(pcr.readmap(landmask05minFile))
    landmask = pcr.ifthen(landmask, landmask)
    # - extending landmask with uniqueIDs
    landmask = pcr.cover(landmask, pcr.defined(uniqueIDs))
    
    # extending class (country) ids
    max_step = 5
    for i in range(1, max_step+1, 1):
        cmd = "Extending class: step "+str(i)+" from " + str(max_step)
        print(cmd)
        uniqueIDs = pcr.cover(uniqueIDs, pcr.windowmajority(uniqueIDs, 0.5))
    # - use only cells within the landmask
    uniqueIDs = pcr.ifthen(landmask, uniqueIDs)
    pcr.report(uniqueIDs, "class_ids.map")                                
    
    # cell area at 5 arc min resolution
    cellArea = vos.readPCRmapClone(cellArea05minFile,
                                   cloneMapFileName, tmp_directory)
    cellArea = pcr.ifthen(landmask, cellArea)
    
    # get a sample cell for every id
    x_min_for_each_id = pcr.areaminimum(pcr.xcoordinate(pcr.boolean(1.0)), uniqueIDs)
    sample_cells      = pcr.xcoordinate(pcr.boolean(1.0)) == x_min_for_each_id
    y_min_for_each_id = pcr.areaminimum(pcr.ycoordinate(sample_cells), uniqueIDs)
    sample_cells      = pcr.ycoordinate(sample_cells) == y_min_for_each_id
    uniqueIDs_sample  = pcr.ifthen(sample_cells, uniqueIDs)
    # - save it to a pcraster map file
    pcr.report(uniqueIDs_sample, "sample.ids")                                
Пример #50
0
def getQAtBasinMouths(discharge, basinMouth):
    temp = pcr.ifthenelse(basinMouth != 0, discharge * secondsPerDay(), 0.)
    pcr.report(temp, "temp.map")
    return (getMapTotal(temp) / 1e9)
Пример #51
0
moc = pcraster.moc.initialise(
    pcraster.clone(), timeIncrement, nrParticles, initialConcentration, effectivePorosity, storageCoefficient
)

flux = raster
flowX = raster
flowY = raster
longitudinalDispersionCoefficient = raster
transverseDispersionCoefficient = raster
hydraulicHead = raster
saturatedThickness = raster

concentration, particlesPerCell = moc.transport(
    flux,
    flowX,
    flowY,
    longitudinalDispersionCoefficient,
    transverseDispersionCoefficient,
    hydraulicHead,
    saturatedThickness,
)

changeInConcentration = (raster / raster) + 1.5
concentration = moc.adjust(changeInConcentration)

print type(concentration)

pcraster.report(concentration, "concentration.map")

print "Ok"
def main():

    # output folder (and tmp folder)
    clean_out_folder = True
    if os.path.exists(out_folder):
        if clean_out_folder:
            shutil.rmtree(out_folder)
            os.makedirs(out_folder)
    else:
        os.makedirs(out_folder)
    os.chdir(out_folder)
    os.system("pwd")

    # set the clone map
    print("set the clone")
    pcr.setclone(global_ldd_30min_inp_file)

    # define the landmask
    print("define the landmask")
    # - based on the 30min input
    landmask_30min = define_landmask(input_file = global_landmask_30min_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_30min_only")
    # - based on the 05min input
    landmask_05min = define_landmask(input_file = global_landmask_05min_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_05min_only")
    # - based on the 30sec input
    landmask_30sec = define_landmask(input_file = global_landmask_30sec_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_30sec_only")
    # - based on the 30sec input
    landmask_03sec = define_landmask(input_file = global_landmask_03sec_file,\
                                      clone_map_file = global_ldd_30min_inp_file,\
                                      output_map_file = "landmask_03sec_only")
    #
    # - merge all landmasks
    landmask = pcr.cover(landmask_30min, landmask_05min, landmask_30sec,
                         landmask_03sec)
    pcr.report(landmask, "global_landmask_30min_final.map")
    # ~ pcr.aguila(landmask)

    # extend ldd
    print("extend/define the ldd")
    ldd_map = pcr.readmap(global_ldd_30min_inp_file)
    ldd_map = pcr.ifthen(landmask, pcr.cover(ldd_map, pcr.ldd(5)))
    pcr.report(ldd_map, "global_ldd_final.map")
    # ~ pcr.aguila(ldd_map)

    # identify small islands
    print("identify small islands")
    # - maps of islands smaller than 10000 cells (at half arc degree resolution)
    island_map = pcr.ifthen(landmask, pcr.clump(pcr.defined(ldd_map)))
    island_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), island_map)
    island_map = pcr.ifthen(island_size < 10000., island_map)

    UNTIL_THIS

    # identify the biggest island for every group of small islands within the windows 10x10 arcdeg cells

    # - sort from the largest catchment
    catchment_pits_boolean = pcr.ifthen(
        pcr.scalar(pcr.pit(ldd_map)) > 0.0, pcr.boolean(1.0))
    catchment_pits_boolean = pcr.ifthen(catchment_pits_boolean,
                                        catchment_pits_boolean)
    pcr.aguila(catchment_pits_boolean)
    catchment_map = pcr.nominal(
        pcr.areaorder(catchment_size * -1.0,
                      pcr.nominal(catchment_pits_boolean)))

    island_map = pcr.ifthen(island_size == pcr.windowmaximum(island_size, 10.),
                            island_map)
    pcr.aguila(island_map)

    # identify big catchments

    # merge biggest islands and big catchments

    # make catchment and island map
    print("make catchment and island map")

    # - catchment map
    catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map))
    pcr.report(catchment_map, "global_catchment_not_sorted.map")
    os.system("mapattr -p global_catchment_not_sorted.map")
    num_of_catchments = int(vos.getMinMaxMean(pcr.scalar(catchment_map))[1])

    # - maps of islands smaller than 10000 cells
    island_map = pcr.ifthen(landmask, pcr.clump(pcr.defined(ldd_map)))
    island_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), island_map)
    island_map = pcr.ifthen(island_size < 10000., island_map)
    island_map = pcr.nominal(
        pcr.scalar(pcr.ifthen(landmask, pcr.clump(island_map))) +
        pcr.scalar(num_of_catchments) * 100.)
    pcr.aguila(island_map)

    island_size = pcr.ifthen(pcr.defined(island_map), island_size)

    island_map = pcr.ifthen(island_size == pcr.windowmaximum(island_size, 10.),
                            island_map)
    pcr.aguila(island_map)

    catchment_map = pcr.cover(island_map, catchment_map)
    catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map)

    # - calculate the size
    catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map)
    # - sort from the largest catchment
    catchment_pits_boolean = pcr.ifthen(
        pcr.scalar(pcr.pit(ldd_map)) > 0.0, pcr.boolean(1.0))
    catchment_pits_boolean = pcr.ifthen(catchment_pits_boolean,
                                        catchment_pits_boolean)
    pcr.aguila(catchment_pits_boolean)
    catchment_map = pcr.nominal(
        pcr.areaorder(catchment_size * -1.0,
                      pcr.nominal(catchment_pits_boolean)))
    catchment_map = pcr.catchment(ldd_map, catchment_map)
    pcr.report(catchment_map, "global_catchment_final.map")
    os.system("mapattr -p global_catchment_final.map")
    # - calculate the size
    catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map)
    pcr.report(catchment_size, "global_catchment_size_in_number_of_cells.map")

    # number of catchments
    num_of_catchments = int(vos.getMinMaxMean(pcr.scalar(catchment_map))[1])

    # size of the largest catchment
    catchment_size_max = vos.getMinMaxMean(catchment_size)[1]
    print("")
    print(str(float(catchment_size_max)))
    print("")

    # identify all large catchments with size >= 50 cells (at the resolution of 30 arcmin) = 50 x (50^2) km2 = 125000 km2
    print("identify catchments with the minimum size of 50 cells")
    catchment_map_ge_50 = pcr.ifthen(catchment_size >= 50, catchment_map)
    pcr.report(catchment_map_ge_50, "global_catchment_ge_50_cells.map")

    # include the island
    catchment_map_ge_50 = pcr.cover(island_map, catchment_map_ge_50)

    # perform cdo fillmiss2 in order to merge the small catchments to the nearest large catchments
    cmd = "gdal_translate -of NETCDF global_catchment_ge_50_cells.map global_catchment_ge_50_cells.nc"
    print(cmd)
    os.system(cmd)
    cmd = "cdo fillmiss2 global_catchment_ge_50_cells.nc global_catchment_ge_50_cells_filled.nc"
    print(cmd)
    os.system(cmd)
    cmd = "cdo fillmiss2 global_catchment_ge_50_cells_filled.nc global_catchment_ge_50_cells_filled.nc"
    print(cmd)
    os.system(cmd)
    cmd = "gdal_translate -of PCRaster global_catchment_ge_50_cells_filled.nc global_catchment_ge_50_cells_filled.map"
    print(cmd)
    os.system(cmd)
    cmd = "mapattr -c " + global_ldd_30min_inp_file + " " + "global_catchment_ge_50_cells_filled.map"
    print(cmd)
    os.system(cmd)
    # - initial subdomains
    subdomains_initial = pcr.nominal(
        pcr.readmap("global_catchment_ge_50_cells_filled.map"))
    subdomains_initial = pcr.areamajority(subdomains_initial, catchment_map)
    pcr.aguila(subdomains_initial)
    # - initial subdomains clump
    subdomains_initial_clump = pcr.clump(subdomains_initial)
    pcr.aguila(subdomains_initial_clump)

    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[0])))
    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1])))

    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial_clump))[0])))
    print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial_clump))[1])))

    # - remove temporay files (not used)
    cmd = "rm global_catchment_ge_50_cells_filled*"
    print(cmd)
    os.system(cmd)

    # clone code that will be assigned
    assigned_number = 0
Пример #53
0
def move_raster_to_boolean(pathname):

    raster = pcraster.readmap(pathname)
    raster = pcraster.boolean(raster)
    pcraster.report(raster, pathname)
Пример #54
0
 def test_07(self):
     """ unpickle ldd """
     field_pkl = pickle.load(open("pickle_ldd.pkl", "rb"))
     pcraster.report(field_pkl, "pickle_ldd.map")
     self.assertTrue(self.mapEqualsValidated(field_pkl, "ldd_Result.map"))
Пример #55
0
def writePCRmapToDir(v,outFileName,outDir):
    # v: inputMapFileName or floating values
    # cloneMapFileName: If the inputMap and cloneMap have different clones,
    #                   resampling will be done. Then,   
    fullFileName = getFullPath(outFileName,outDir)
    pcr.report(v,fullFileName)
Пример #56
0
def main():
    """

    """
    workdir = "."
    inifile = "wflow_prepare.ini"

    try:
        opts, args = getopt.getopt(sys.argv[1:], "W:hI:f", ['version'])
    except getopt.error as msg:
        usage(msg)

    for o, a in opts:
        if o == "-W":
            workdir = a
        if o == "-I":
            inifile = a
        if o == "-h":
            usage()
        if o == "-f":
            recreate = True
        if o == "--version":
            import wflow
            print("wflow version: ", wflow.__version__)
            sys.exit(0)

    os.chdir(workdir)

    config = OpenConf(workdir + "/" + inifile)

    step1dir = configget(config, "directories", "step1dir", "step1")
    step2dir = configget(config, "directories", "step2dir", "step2")
    snapgaugestoriver = bool(
        int(configget(config, "settings", "snapgaugestoriver", "1"))
    )

    # make the directories to save results in
    if not os.path.isdir(step1dir + "/"):
        os.makedirs(step1dir)
    if not os.path.isdir(step2dir):
        os.makedirs(step2dir)

    ##first make the clone map
    try:
        Xul = float(config.get("settings", "Xul"))
        Yul = float(config.get("settings", "Yul"))
        Xlr = float(config.get("settings", "Xlr"))
        Ylr = float(config.get("settings", "Ylr"))
    except:
        print("Xul, Xul, Xlr and  Ylr are required entries in the ini file")
        sys.exit(1)

    csize = float(configget(config, "settings", "cellsize", "1"))
    try:
        gauges_x = config.get("settings", "gauges_x")
        gauges_y = config.get("settings", "gauges_y")
    except:
        print("gauges_x and  gauges_y are required entries in the ini file")
        sys.exit(1)

    strRiver = int(configget(config, "settings", "riverorder_step2", "4"))

    corevolume = float(configget(config, "settings", "corevolume", "1E35"))
    catchmentprecipitation = float(
        configget(config, "settings", "catchmentprecipitation", "1E35")
    )
    corearea = float(configget(config, "settings", "corearea", "1E35"))
    outflowdepth = float(configget(config, "settings", "lddoutflowdepth", "1E35"))
    lddmethod = configget(config, "settings", "lddmethod", "dem")
    lddglobaloption = configget(config, "settings", "lddglobaloption", "lddout")
    pcr.setglobaloption(lddglobaloption)

    nrrow = round(abs(Yul - Ylr) / csize)
    nrcol = round(abs(Xlr - Xul) / csize)
    mapstr = (
        "mapattr -s -S -R "
        + str(nrrow)
        + " -C "
        + str(nrcol)
        + " -l "
        + str(csize)
        + " -x "
        + str(Xul)
        + " -y "
        + str(Yul)
        + " -P yb2t "
        + step2dir
        + "/cutout.map"
    )

    os.system(mapstr)
    pcr.setclone(step2dir + "/cutout.map")

    lu_water = configget(config, "files", "lu_water", "")
    lu_paved = configget(config, "files", "lu_paved", "")

    if lu_water:
        os.system(
            "resample --clone "
            + step2dir
            + "/cutout.map "
            + lu_water
            + " "
            + step2dir
            + "/wflow_waterfrac.map"
        )

    if lu_paved:
        os.system(
            "resample --clone "
            + step2dir
            + "/cutout.map "
            + lu_paved
            + " "
            + step2dir
            + "/PathFrac.map"
        )

    #
    try:
        lumap = config.get("files", "landuse")
    except:
        print("no landuse map...creating uniform map")
        clone = pcr.readmap(step2dir + "/cutout.map")
        pcr.report(pcr.nominal(clone), step2dir + "/wflow_landuse.map")
    else:
        os.system(
            "resample --clone "
            + step2dir
            + "/cutout.map "
            + lumap
            + " "
            + step2dir
            + "/wflow_landuse.map"
        )

    try:
        soilmap = config.get("files", "soil")
    except:
        print("no soil map..., creating uniform map")
        clone = pcr.readmap(step2dir + "/cutout.map")
        pcr.report(pcr.nominal(clone), step2dir + "/wflow_soil.map")
    else:
        os.system(
            "resample --clone "
            + step2dir
            + "/cutout.map "
            + soilmap
            + " "
            + step2dir
            + "/wflow_soil.map"
        )

    resamplemaps(step1dir, step2dir)

    dem = pcr.readmap(step2dir + "/wflow_dem.map")
    demmin = pcr.readmap(step2dir + "/wflow_demmin.map")
    demmax = pcr.readmap(step2dir + "/wflow_demmax.map")
    # catchcut = pcr.readmap(step2dir + "/catchment_cut.map")
    catchcut = pcr.readmap(step2dir + "/cutout.map")
    # now apply the area of interest (catchcut) to the DEM
    # dem=pcr.ifthen(catchcut >=1 , dem)
    #

    # See if there is a shape file of the river to burn in
    try:
        rivshp = config.get("files", "river")
    except:
        print("no river file specified")
        riverburn = pcr.readmap(step2dir + "/wflow_riverburnin.map")
    else:
        print("river file speficied.....")
        # rivshpattr = config.get("files","riverattr")
        pcr.report(dem * 0.0, step2dir + "/nilmap.map")
        thestr = (
            "gdal_translate -of GTiff "
            + step2dir
            + "/nilmap.map "
            + step2dir
            + "/wflow_riverburnin.tif"
        )
        os.system(thestr)
        rivshpattr = os.path.splitext(os.path.basename(rivshp))[0]
        os.system(
            "gdal_rasterize -burn 1 -l "
            + rivshpattr
            + " "
            + rivshp
            + " "
            + step2dir
            + "/wflow_riverburnin.tif"
        )
        thestr = (
            "gdal_translate -of PCRaster "
            + step2dir
            + "/wflow_riverburnin.tif "
            + step2dir
            + "/wflow_riverburnin.map"
        )
        os.system(thestr)
        riverburn = pcr.readmap(step2dir + "/wflow_riverburnin.map")
        # ldddem = pcr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)

    # Only burn within the original catchment
    riverburn = pcr.ifthen(pcr.scalar(catchcut) >= 1, riverburn)
    # Now setup a very high wall around the catchment that is scale
    # based on the distance to the catchment so that it slopes away from the
    # catchment
    if lddmethod != "river":
        print("Burning in highres-river ...")
        disttocatch = pcr.spread(pcr.nominal(catchcut), 0.0, 1.0)
        demmax = pcr.ifthenelse(
            pcr.scalar(catchcut) >= 1.0,
            demmax,
            demmax + (pcr.celllength() * 100.0) / disttocatch,
        )
        pcr.setglobaloption("unitcell")
        # demregional=pcr.windowaverage(demmin,100)
        demburn = pcr.cover(pcr.ifthen(pcr.boolean(riverburn), demmin - 100.0), demmax)
    else:
        print("using average dem..")
        demburn = dem

    ldd = tr.lddcreate_save(
        step2dir + "/wflow_ldd.map",
        demburn,
        True,
        outflowdepth=outflowdepth,
        corevolume=corevolume,
        catchmentprecipitation=catchmentprecipitation,
        corearea=corearea,
    )

    # Find catchment (overall)
    outlet = tr.find_outlet(ldd)
    sub = tr.subcatch(ldd, outlet)
    pcr.report(sub, step2dir + "/wflow_catchment.map")
    pcr.report(outlet, step2dir + "/wflow_outlet.map")

    # make river map
    strorder = pcr.streamorder(ldd)
    pcr.report(strorder, step2dir + "/wflow_streamorder.map")

    river = pcr.ifthen(pcr.boolean(strorder >= strRiver), strorder)
    pcr.report(river, step2dir + "/wflow_river.map")

    # make subcatchments
    # os.system("col2map --clone " + step2dir + "/cutout.map gauges.col " + step2dir + "/wflow_gauges.map")
    X = np.fromstring(gauges_x, sep=',')
    Y = np.fromstring(gauges_y, sep=',')

    pcr.setglobaloption("unittrue")

    outlmap = tr.points_to_map(dem, X, Y, 0.5)
    pcr.report(outlmap, step2dir + "/wflow_gauges_.map")

    if snapgaugestoriver:
        print("Snapping gauges to river")
        pcr.report(outlmap, step2dir + "/wflow_orggauges.map")
        outlmap = tr.snaptomap(outlmap, river)

    outlmap = pcr.ifthen(outlmap > 0, outlmap)
    pcr.report(outlmap, step2dir + "/wflow_gauges.map")

    scatch = tr.subcatch(ldd, outlmap)
    pcr.report(scatch, step2dir + "/wflow_subcatch.map")
# - calculate the upstream area of every pixel:
upstream_area = pcr.catchmenttotal(cell_area, ldd)
# - calculate the catchment area of every basin:
upstream_area_maximum = pcr.areamaximum(upstream_area, basin_map)
# - identify the outlet of every basin (in order to rederive the basin so that it is consistent with the ldd)
outlet = pcr.nominal(pcr.uniqueid(pcr.ifthen(upstream_area == upstream_area_maximum, pcr.boolean(1.0))))
# - ignoring outlets with small upstream areas
threshold = 50. * 1000. * 1000.                                                 # unit: m2
outlet    = pcr.ifthen(upstream_area_maximum > threshold, outlet)
#~ pcr.aguila(outlet)
outlet = pcr.cover(outlet, pcr.nominal(0.0))
# - recalculate the basin
basin_map  = pcr.nominal(pcr.subcatchment(ldd, outlet))
basin_map  = pcr.clump(basin_map)
basin_map  = pcr.ifthen(landmask, basin_map)
pcr.report(basin_map , "basin_map.map")
#~ pcr.aguila(basin_map)
# - calculate the basin area
basin_area = pcr.areatotal(cell_area, basin_map)
pcr.report(basin_area, "basin_area.map")
#~ pcr.aguila(basin_area)


# finding the month that give the maximum discharge (from the climatology time series)
msg = "Identifying the month with peak discharge (from climatology time series):"
logger.info(msg)	
# - read the maximum monthly discharge for every basin
maximum_discharge = vos.netcdf2PCRobjClone(input_files['maximumClimatologyDischargeMonthAvg'], \
                                           "discharge", 1,\
                                           useDoy = "Yes",
                                           cloneMapFileName  = clone_map_file,\
Пример #58
0
def data_assimilation(begin_date, n_days, perform_DA = False, ConvLSTM_arch = None):
    '''
    
    Keyword arguments:
    begin_date: Define date when DA starts with shape: (YYYY-MM-DD)
    n_days: Define for how many days DA is performed from begin date
    perform_DA: if TRUE: perform DA assimilation, if FALSE: no DA is performed, open loop run
    ConvLSTM_arch: Select architecture: 'stacked_sep_1' (for parallel model) or 'stacked_2' (for stacked model)
    '''
    
    begin_date = begin_date + str(' 00:00:00')
    
    q_val_modeled_6335115 = []
    q_val_modeled_6335117 = []
    q_val_modeled_9316159 = []
    q_val_modeled_9316160 = []
    q_val_modeled_9316161 = []
    q_val_modeled_9316163 = []
    q_val_modeled_9316166 = []
    q_val_modeled_9316168 = []
    q_val_modeled_9316170 = []
    date_list = []
    
    states = np.zeros((n_days, 91, 134))
    
    for i in tqdm(np.arange(n_days)):
        if i == 0:
            begin_date = datetime.strptime(begin_date, '%Y-%m-%d %H:%M:%S')
            begin_date = begin_date - timedelta(days = 1)#2
            start_time = str(begin_date)
        else:
            start_time = end_time
        
        for name in state_keys:
            global state_file
            state = state_file[name][test_split[0] - 1 + i] #-2
            # state = state_file[name][i]
            state = np.ma.getdata(state)
            state = numpy_operations.numpy2pcr(Scalar, state, -9999)
            # aguila(state)
            report(state, ('wflow_sbm/Nahe/instate/' 
                       + state_dict[name] + str('.map')))

                
        end_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
        end_time = end_time + timedelta(days = 1)
        print_time = end_time + timedelta(days = 1)
        print_time = str(print_time)
        end_time = str(end_time)

        config = configparser.ConfigParser()
        config.optionxform = str
        config.read('wflow_sbm/Nahe/wflow_sbm.ini')
        config.set('run', 'starttime', start_time)
        config.set('run', 'endtime', end_time)

        with open('wflow_sbm/Nahe/wflow_sbm.ini', 'w') as configfile:
            config.write(configfile)

        if perform_DA == True:
            if ConvLSTM_arch == 'stacked_sep_1':
                model_stacked_sep_1 = keras.models.load_model('saved_models/model_stacked_sep_1.h5', compile = False)
                prediction = model_stacked_sep_1.predict(x = [features_convlstm_test[0+i:1+i,:,:,:,0:1],
                                                              features_convlstm_test[0+i:1+i,:,:,:,1:2],
                                                              features_convlstm_test[0+i:1+i,:,:,:,2:3],
                                                              features_convlstm_test[0+i:1+i,:,:,:,3:4]])   
            elif ConvLSTM_arch == 'stacked_2':
                model_stacked_2 = keras.models.load_model('saved_models/model_stacked_2.h5', compile = False)
                prediction = model_stacked_2.predict(x = features_convlstm_test[0+i:1+i])
                
            prediction = prediction[0,:,:,0]
            prediction[mask] = -9999
            state_ust_0 = numpy_operations.numpy2pcr(Scalar, prediction, -9999)
            report(state_ust_0, ('wflow_sbm/Nahe/instate/UStoreLayerDepth_0.map'))


        subprocess.run(['.../wflow_sbm.py', '-C',
                        'wflow_sbm/Nahe', '-R', 'da_run', '-f'])
                        
        
        
        states_act = '.../outmaps.nc'
        states_act = nc.Dataset(states_act)
        states_act = states_act['ust_0_'][:]
        states_act = np.ma.getdata(states_act)
        states[i] = states_act
        
        q_modeled = pd.read_csv('.../run.csv')
        
        
        q_val_modeled_6335115.append(q_modeled.loc[0]['6335115'])
        q_val_modeled_6335117.append(q_modeled.loc[0]['6335117'])
        q_val_modeled_9316159.append(q_modeled.loc[0]['9316159'])
        q_val_modeled_9316160.append(q_modeled.loc[0]['9316160'])
        q_val_modeled_9316161.append(q_modeled.loc[0]['9316161'])
        q_val_modeled_9316163.append(q_modeled.loc[0]['9316163'])
        q_val_modeled_9316166.append(q_modeled.loc[0]['9316166'])
        q_val_modeled_9316168.append(q_modeled.loc[0]['9316168'])
        q_val_modeled_9316170.append(q_modeled.loc[0]['9316170'])
        date_list.append(print_time)
    
    q_val_modeled_df = pd.DataFrame(columns = ['6335115', '6335117', '9316159', '9316160', '9316161', '9316163', '9316166', '9316168', '9316170'])
    q_val_modeled_df['6335115'] = q_val_modeled_6335115
    q_val_modeled_df['6335117'] = q_val_modeled_6335117
    q_val_modeled_df['9316159'] = q_val_modeled_9316159
    q_val_modeled_df['9316160'] = q_val_modeled_9316160
    q_val_modeled_df['9316161'] = q_val_modeled_9316161
    q_val_modeled_df['9316163'] = q_val_modeled_9316163
    q_val_modeled_df['9316166'] = q_val_modeled_9316166
    q_val_modeled_df['9316168'] = q_val_modeled_9316168
    q_val_modeled_df['9316170'] = q_val_modeled_9316170
    q_val_modeled_df['date'] = date_list

    np.savetxt(".../run_all.csv", q_val_modeled_df, delimiter=",", header = "6335115, 6335117, 9316159, 9316160, 9316161, 9316163, 9316166, 9316168, 9316170, date", comments = "", fmt="%s")
    
    np.save('.../statefile.npy', states)
    def identifyModelPixel(self,tmpDir,\
                                catchmentAreaAll,\
                                landMaskClass,\
                                xCoordinate,yCoordinate,id):     

        # TODO: Include an option to consider average discharge. 
        
        logger.info("Identify model pixel for the grdc station "+str(id)+".")
        
        # make a temporary directory:
        randomDir = self.makeRandomDir(tmpDir) 

        # coordinate of grdc station
        xCoord  = float(self.attributeGRDC["grdc_longitude_in_arc_degree"][str(id)])
        yCoord  = float(self.attributeGRDC["grdc_latitude_in_arc_degree"][str(id)])
        
        # identify the point at pcraster model
        point = pcr.ifthen((pcr.abs(xCoordinate - xCoord) == pcr.mapminimum(pcr.abs(xCoordinate - xCoord))) &\
                           (pcr.abs(yCoordinate - yCoord) == pcr.mapminimum(pcr.abs(yCoordinate - yCoord))), \
                            pcr.boolean(1))
        
        # expanding the point
        point = pcr.windowmajority(point, self.cell_size_in_arc_degree * 5.0)
        point = pcr.ifthen(catchmentAreaAll > 0, point)
        point = pcr.boolean(point)

        # values based on the model;
        modelCatchmentArea = pcr.ifthen(point, catchmentAreaAll)        # unit: km2
        model_x_ccordinate = pcr.ifthen(point, xCoordinate)             # unit: arc degree
        model_y_ccordinate = pcr.ifthen(point, yCoordinate)             # unit: arc degree
        
        # calculate (absolute) difference with GRDC data
        # - initiating all of them with the values of MV
        diffCatchArea = pcr.abs(pcr.scalar(vos.MV))        # difference between the model and grdc catchment area (unit: km2) 
        diffDistance  = pcr.abs(pcr.scalar(vos.MV))        # distance between the model pixel and grdc catchment station (unit: arc degree)
        diffLongitude = pcr.abs(pcr.scalar(vos.MV))        # longitude difference (unit: arc degree)
        diffLatitude  = pcr.abs(pcr.scalar(vos.MV))        # latitude difference (unit: arc degree)
        #
        # - calculate (absolute) difference with GRDC data
        try:
            diffCatchArea = pcr.abs(modelCatchmentArea-\
                            float(self.attributeGRDC["grdc_catchment_area_in_km2"][str(id)]))
        except:
            logger.info("The difference in the model and grdc catchment area cannot be calculated.")
        try:
            diffLongitude = pcr.abs(model_x_ccordinate - xCoord)
        except:
            logger.info("The difference in longitude cannot be calculated.")
        try:
            diffLatitude  = pcr.abs(model_y_ccordinate - yCoord)
        except:
            logger.info("The difference in latitude cannot be calculated.")
        try:
            diffDistance  = (diffLongitude**(2) + \
                              diffLatitude**(2))**(0.5)                 # TODO: calculate distance in meter
        except:
            logger.info("Distance cannot be calculated.")
        
        # identify  masks
        masks = pcr.ifthen(pcr.boolean(point), landMaskClass)                                          

        # export the difference to temporary files: maps and txt
        catchmentAreaMap = randomDir+"/"+vos.get_random_word()+".area.map"
        diffCatchAreaMap = randomDir+"/"+vos.get_random_word()+".dare.map"
        diffDistanceMap  = randomDir+"/"+vos.get_random_word()+".dist.map"
        diffLatitudeMap  = randomDir+"/"+vos.get_random_word()+".dlat.map"
        diffLongitudeMap = randomDir+"/"+vos.get_random_word()+".dlon.map"
        diffLatitudeMap  = randomDir+"/"+vos.get_random_word()+".dlat.map"
        #
        maskMap          = randomDir+"/"+vos.get_random_word()+".mask.map"
        diffColumnFile   = randomDir+"/"+vos.get_random_word()+".cols.txt" # output
        #
        pcr.report(pcr.ifthen(point,modelCatchmentArea), catchmentAreaMap)
        pcr.report(pcr.ifthen(point,diffCatchArea     ), diffCatchAreaMap)
        pcr.report(pcr.ifthen(point,diffDistance      ), diffDistanceMap )
        pcr.report(pcr.ifthen(point,diffLatitude      ), diffLongitudeMap)
        pcr.report(pcr.ifthen(point,diffLongitude     ), diffLatitudeMap )
        pcr.report(pcr.ifthen(point,masks             ), maskMap)
        #
        cmd = 'map2col '+catchmentAreaMap +' '+\
                         diffCatchAreaMap +' '+\
                         diffDistanceMap  +' '+\
                         diffLongitudeMap +' '+\
                         diffLatitudeMap  +' '+\
                         maskMap+' '+diffColumnFile
        print(cmd); os.system(cmd) 
        
        # use R to sort the file
        cmd = 'R -f saveIdentifiedPixels.R '+diffColumnFile
        print(cmd); os.system(cmd) 
        
        try:
            # read the output file (from R)
            f = open(diffColumnFile+".sel") ; allLines = f.read() ; f.close()
        
            # split the content of the file into several lines
            allLines = allLines.replace("\r",""); allLines = allLines.split("\n")
        
            selectedPixel = allLines[0].split(";")

            model_longitude_in_arc_degree = float(selectedPixel[0])
            model_latitude_in_arc_degree  = float(selectedPixel[1])
            model_catchment_area_in_km2   = float(selectedPixel[2])
            model_landmask                = str(selectedPixel[7])
            
            log_message  = "Model pixel for grdc station "+str(id)+" is identified (lat/lon in arc degree): "
            log_message += str(model_latitude_in_arc_degree) + " ; " +  str(model_longitude_in_arc_degree)
            logger.info(log_message)
            
            self.attributeGRDC["model_longitude_in_arc_degree"][str(id)] = model_longitude_in_arc_degree 
            self.attributeGRDC["model_latitude_in_arc_degree"][str(id)]  = model_latitude_in_arc_degree  
            self.attributeGRDC["model_catchment_area_in_km2"][str(id)]   = model_catchment_area_in_km2   
            self.attributeGRDC["model_landmask"][str(id)]                = model_landmask                

        except:
        
            logger.info("Model pixel for grdc station "+str(id)+" can NOT be identified.")
        
        self.cleanRandomDir(randomDir)
Пример #60
0
    def identifyModelPixel(self,tmpDir,\
                                catchmentAreaAll,\
                                landMaskClass,\
                                xCoordinate,yCoordinate,id):     

        # TODO: Include an option to consider average discharge. 
        
        logger.info("Identify model pixel for the grdc station "+str(id)+".")
        
        # make a temporary directory:
        randomDir = self.makeRandomDir(tmpDir) 

        # coordinate of grdc station
        xCoord  = float(self.attributeGRDC["grdc_longitude_in_arc_degree"][str(id)])
        yCoord  = float(self.attributeGRDC["grdc_latitude_in_arc_degree"][str(id)])
        
        # identify the point at pcraster model
        point = pcr.ifthen((pcr.abs(xCoordinate - xCoord) == pcr.mapminimum(pcr.abs(xCoordinate - xCoord))) &\
                           (pcr.abs(yCoordinate - yCoord) == pcr.mapminimum(pcr.abs(yCoordinate - yCoord))), \
                            pcr.boolean(1))
        
        # expanding the point
        point = pcr.windowmajority(point, self.cell_size_in_arc_degree * 5.0)
        point = pcr.ifthen(catchmentAreaAll > 0, point)
        point = pcr.boolean(point)

        # values based on the model;
        modelCatchmentArea = pcr.ifthen(point, catchmentAreaAll)        # unit: km2
        model_x_ccordinate = pcr.ifthen(point, xCoordinate)             # unit: arc degree
        model_y_ccordinate = pcr.ifthen(point, yCoordinate)             # unit: arc degree
        
        # calculate (absolute) difference with GRDC data
        # - initiating all of them with the values of MV
        diffCatchArea = pcr.abs(pcr.scalar(vos.MV))        # difference between the model and grdc catchment area (unit: km2) 
        diffDistance  = pcr.abs(pcr.scalar(vos.MV))        # distance between the model pixel and grdc catchment station (unit: arc degree)
        diffLongitude = pcr.abs(pcr.scalar(vos.MV))        # longitude difference (unit: arc degree)
        diffLatitude  = pcr.abs(pcr.scalar(vos.MV))        # latitude difference (unit: arc degree)
        #
        # - calculate (absolute) difference with GRDC data
        try:
            diffCatchArea = pcr.abs(modelCatchmentArea-\
                            float(self.attributeGRDC["grdc_catchment_area_in_km2"][str(id)]))
        except:
            logger.info("The difference in the model and grdc catchment area cannot be calculated.")
        try:
            diffLongitude = pcr.abs(model_x_ccordinate - xCoord)
        except:
            logger.info("The difference in longitude cannot be calculated.")
        try:
            diffLatitude  = pcr.abs(model_y_ccordinate - yCoord)
        except:
            logger.info("The difference in latitude cannot be calculated.")
        try:
            diffDistance  = (diffLongitude**(2) + \
                              diffLatitude**(2))**(0.5)                 # TODO: calculate distance in meter
        except:
            logger.info("Distance cannot be calculated.")
        
        # identify  masks
        masks = pcr.ifthen(pcr.boolean(point), landMaskClass)                                          

        # export the difference to temporary files: maps and txt
        catchmentAreaMap = randomDir+"/"+vos.get_random_word()+".area.map"
        diffCatchAreaMap = randomDir+"/"+vos.get_random_word()+".dare.map"
        diffDistanceMap  = randomDir+"/"+vos.get_random_word()+".dist.map"
        diffLatitudeMap  = randomDir+"/"+vos.get_random_word()+".dlat.map"
        diffLongitudeMap = randomDir+"/"+vos.get_random_word()+".dlon.map"
        diffLatitudeMap  = randomDir+"/"+vos.get_random_word()+".dlat.map"
        #
        maskMap          = randomDir+"/"+vos.get_random_word()+".mask.map"
        diffColumnFile   = randomDir+"/"+vos.get_random_word()+".cols.txt" # output
        #
        pcr.report(pcr.ifthen(point,modelCatchmentArea), catchmentAreaMap)
        pcr.report(pcr.ifthen(point,diffCatchArea     ), diffCatchAreaMap)
        pcr.report(pcr.ifthen(point,diffDistance      ), diffDistanceMap )
        pcr.report(pcr.ifthen(point,diffLatitude      ), diffLongitudeMap)
        pcr.report(pcr.ifthen(point,diffLongitude     ), diffLatitudeMap )
        pcr.report(pcr.ifthen(point,masks             ), maskMap)
        #
        cmd = 'map2col '+catchmentAreaMap +' '+\
                         diffCatchAreaMap +' '+\
                         diffDistanceMap  +' '+\
                         diffLongitudeMap +' '+\
                         diffLatitudeMap  +' '+\
                         maskMap+' '+diffColumnFile
        print(cmd); os.system(cmd) 
        
        # use R to sort the file
        cmd = 'R -f saveIdentifiedPixels.R '+diffColumnFile
        print(cmd); os.system(cmd) 
        
        try:
            # read the output file (from R)
            f = open(diffColumnFile+".sel") ; allLines = f.read() ; f.close()
        
            # split the content of the file into several lines
            allLines = allLines.replace("\r",""); allLines = allLines.split("\n")
        
            selectedPixel = allLines[0].split(";")

            model_longitude_in_arc_degree = float(selectedPixel[0])
            model_latitude_in_arc_degree  = float(selectedPixel[1])
            model_catchment_area_in_km2   = float(selectedPixel[2])
            model_landmask                = str(selectedPixel[7])
            
            log_message  = "Model pixel for grdc station "+str(id)+" is identified (lat/lon in arc degree): "
            log_message += str(model_latitude_in_arc_degree) + " ; " +  str(model_longitude_in_arc_degree)
            logger.info(log_message)
            
            self.attributeGRDC["model_longitude_in_arc_degree"][str(id)] = model_longitude_in_arc_degree 
            self.attributeGRDC["model_latitude_in_arc_degree"][str(id)]  = model_latitude_in_arc_degree  
            self.attributeGRDC["model_catchment_area_in_km2"][str(id)]   = model_catchment_area_in_km2   
            self.attributeGRDC["model_landmask"][str(id)]                = model_landmask                

        except:
        
            logger.info("Model pixel for grdc station "+str(id)+" can NOT be identified.")
        
        self.cleanRandomDir(randomDir)