def testNonSpatialConversions(self): nonSpatialValue = pcraster.mapmaximum( pcraster.readmap("map2asc_PCRmap.map")) # Ordinal. nonSpatial = pcraster.ordinal(nonSpatialValue) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 124) self.assertEqual(float(nonSpatial), 124.0) # Nominal. nonSpatial = pcraster.nominal(nonSpatialValue) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 124) self.assertEqual(float(nonSpatial), 124) # Boolean. nonSpatial = pcraster.boolean(nonSpatialValue) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 1) self.assertEqual(float(nonSpatial), 1.0) # Scalar. nonSpatial = pcraster.scalar(pcraster.mapmaximum("abs_Expr.map")) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 14) self.assertEqual(float(nonSpatial), 14.0)
def testNonSpatialConversions(self): nonSpatialValue = pcraster.mapmaximum(pcraster.readmap("map2asc_PCRmap.map")) # Ordinal. nonSpatial = pcraster.ordinal(nonSpatialValue) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 124) self.assertEqual(float(nonSpatial), 124.0) # Nominal. nonSpatial = pcraster.nominal(nonSpatialValue) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 124) self.assertEqual(float(nonSpatial), 124) # Boolean. nonSpatial = pcraster.boolean(nonSpatialValue) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 1) self.assertEqual(float(nonSpatial), 1.0) # Scalar. nonSpatial = pcraster.scalar(pcraster.mapmaximum("abs_Expr.map")) self.assertEqual(bool(nonSpatial), True) self.assertEqual(int(nonSpatial), 14) self.assertEqual(float(nonSpatial), 14.0)
def testCellValueNonSpatial(self): raster = pcraster.readmap("abs_Expr.map") value, isValid = pcraster.cellvalue(pcraster.mapmaximum(raster), 1, 1) self.assertEqual(isValid, True) self.assert_(isinstance(value, types.FloatType)) self.assertEqual(value, 14.0) value, isValid = pcraster.cellvalue(pcraster.mapmaximum(raster), 1) self.assertEqual(isValid, True) self.assert_(isinstance(value, types.FloatType)) self.assertEqual(value, 14.0)
def testCellValueNonSpatial(self): raster = self._read_set_clone("abs_Expr.map") value, isValid = pcraster.cellvalue(pcraster.mapmaximum(raster), 1, 1) self.assertEqual(isValid, True) self.assertTrue(isinstance(value, float)) self.assertEqual(value, 14.0) value, isValid = pcraster.cellvalue(pcraster.mapmaximum(raster), 1) self.assertEqual(isValid, True) self.assertTrue(isinstance(value, float)) self.assertEqual(value, 14.0)
def boundingBox(pcrmap): ''' derive the bounding box for a map, return xmin,ymin,xmax,ymax ''' bb = [] xcoor = pcr.xcoordinate(pcrmap) ycoor = pcr.ycoordinate(pcrmap) xmin = pcr.cellvalue(pcr.mapminimum(xcoor), 1, 1)[0] xmax = pcr.cellvalue(pcr.mapmaximum(xcoor), 1, 1)[0] ymin = pcr.cellvalue(pcr.mapminimum(ycoor), 1, 1)[0] ymax = pcr.cellvalue(pcr.mapmaximum(ycoor), 1, 1)[0] return [math.floor(xmin), math.floor(ymin), math.ceil(xmax), math.ceil(ymax)]
def test_07(self): """ cellvalue_by_index NonSpatial """ raster = self._read_set_clone("abs_Expr.map") value, isValid = pcraster.cellvalue_by_indices(pcraster.mapmaximum(raster), 0, 0) self.assertEqual(isValid, True) self.assertTrue(isinstance(value, float)) self.assertEqual(value, 14.0) value, isValid = pcraster.cellvalue_by_index(pcraster.mapmaximum(raster), 0) self.assertEqual(isValid, True) self.assertTrue(isinstance(value, float)) self.assertEqual(value, 14.0)
def checkerboard(mapin, fcc): """ checkerboard create a checkerboard map with unique id's in a fcc*fcc cells area. The resulting map can be used to derive statistics for (later) upscaling of maps (using the fcc factor) .. warning: use with unitcell to get most reliable results! Input: - map (used to determine coordinates) - fcc (size of the areas in cells) Output: - checkerboard type map """ msker = pcr.defined(mapin) ymin = pcr.mapminimum(pcr.ycoordinate(msker)) yc = (pcr.ycoordinate((msker)) - ymin) / pcr.celllength() yc = pcr.rounddown(yc / fcc) # yc = yc/fcc xmin = pcr.mapminimum(pcr.xcoordinate((msker))) xc = (pcr.xcoordinate((msker)) - xmin) / pcr.celllength() xc = pcr.rounddown(xc / fcc) # xc = xc/fcc yc = yc * (pcr.mapmaximum(xc) + 1.0) xy = pcr.ordinal(xc + yc) return xy
def getMinMaxMean(mapFile,ignoreEmptyMap=False): mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0] mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0] nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 ) [0] #/ getNumNonMissingValues(mapFile) if nrValues == 0.0 and ignoreEmptyMap: return 0.0,0.0,0.0 else: return mn,mx,(getMapTotal(mapFile) / nrValues)
def __init__(self, tssFilename, model, idMap=None, noHeader=False): """ """ if not isinstance(tssFilename, str): raise Exception( "timeseries output filename must be of type string") self._outputFilename = tssFilename self._maxId = 1 self._spatialId = None self._spatialDatatype = None self._spatialIdGiven = False self._userModel = model self._writeHeader = not noHeader # array to store the timestep values self._sampleValues = None _idMap = False if isinstance(idMap, str) or isinstance(idMap, pcraster._pcraster.Field): _idMap = True nrRows = self._userModel.nrTimeSteps() - self._userModel.firstTimeStep( ) + 1 if _idMap: self._spatialId = idMap if isinstance(idMap, str): self._spatialId = pcraster.readmap(idMap) _allowdDataTypes = [ pcraster.Nominal, pcraster.Ordinal, pcraster.Boolean ] if self._spatialId.dataType() not in _allowdDataTypes: raise Exception( "idMap must be of type Nominal, Ordinal or Boolean") if self._spatialId.isSpatial(): self._maxId, valid = pcraster.cellvalue( pcraster.mapmaximum(pcraster.ordinal(self._spatialId)), 1) else: self._maxId = 1 # cell indices of the sample locations self._sampleAddresses = [] for cellId in range(1, self._maxId + 1): self._sampleAddresses.append(self._getIndex(cellId)) self._spatialIdGiven = True nrCols = self._maxId self._sampleValues = [[Decimal("NaN")] * nrCols for _ in [0] * nrRows] else: self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows]
def sample(self, expression): """ Sampling the current values of 'expression' at the given locations for the current timestep """ arrayRowPos = self._userModel.currentTimeStep( ) - self._userModel.firstTimeStep() #if isinstance(expression, float): # expression = pcraster.scalar(expression) try: # store the data type for tss file header if self._spatialDatatype == None: self._spatialDatatype = str(expression.dataType()) except AttributeError as e: datatype, sep, tail = str(e).partition(" ") msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % ( datatype) raise AttributeError(msg) if self._spatialIdGiven: if expression.dataType() == pcraster.Scalar or expression.dataType( ) == pcraster.Directional: tmp = pcraster.areaaverage(pcraster.spatial(expression), pcraster.spatial(self._spatialId)) else: tmp = pcraster.areamajority(pcraster.spatial(expression), pcraster.spatial(self._spatialId)) col = 0 for cellIndex in self._sampleAddresses: value, valid = pcraster.cellvalue(tmp, cellIndex) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos][col] = value col += 1 else: if expression.dataType() == pcraster.Scalar or expression.dataType( ) == pcraster.Directional: tmp = pcraster.maptotal(pcraster.spatial(expression))\ / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression)))) else: tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\ pcraster.spatial(pcraster.nominal(1))))) value, valid = pcraster.cellvalue(tmp, 1) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos] = value if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps(): self._writeTssFile()
def pointPerClass(classMap): """ Select a single random point from each class in classMap""" rand1 = 100 * pcr.uniform(pcr.boolean(classMap)) rand2 = 100 * pcr.uniform(pcr.boolean(classMap)) rand3 = 100 * pcr.uniform(pcr.boolean(classMap)) randomMap = pcr.scalar(classMap) * rand1 * rand2 * rand3 pointMap = pcr.ifthen(randomMap == pcr.areaminimum(randomMap, classMap), classMap) nrPointsPerClass = pcr.areatotal(pcr.scalar(pcr.boolean(pointMap)), classMap) assert pcr.cellvalue(pcr.mapmaximum(nrPointsPerClass), 0)[0] == 1 return pointMap
def area_total_value(values, area_class): """Calculate the total value over the area class. values: map with values area_class: project area of the measure Returns a float with the total value over the area class. """ area_total = pcr.areatotal(pcr.scalar(values), pcr.nominal(area_class)) total_value, _ = pcr.cellvalue(pcr.mapmaximum(area_total), 1, 1) if total_value <= -3.40282346638e+38: # return 0 if only missing values total_value = 0 return total_value
def readTopo(self, iniItems, optionDict): # a dictionary/section of options that will be used if optionDict == None: optionDict = iniItems._sections["landSurfaceOptions"] # maps of elevation attributes: topoParams = ['tanslope','slopeLength','orographyBeta'] if optionDict['topographyNC'] == str(None): for var in topoParams: input = configget(iniItems,"landSurfaceOptions",str(var),"None") vars(self)[var] = vos.readPCRmapClone(input,self.cloneMap, self.tmpDir,self.inputDir) if var != "slopeLength": vars(self)[var] = pcr.cover(vars(self)[var], 0.0) else: topoPropertiesNC = vos.getFullPath(\ optionDict['topographyNC'], self.inputDir) for var in topoParams: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(\ topoPropertiesNC,var, \ cloneMapFileName = self.cloneMap) if var != "slopeLength": vars(self)[var] = pcr.cover(vars(self)[var], 0.0) #~ self.tanslope = pcr.max(self.tanslope, 0.00001) # In principle, tanslope can be zero. Zero tanslope will provide zero TCL (no interflow) # covering slopeLength with its maximum value self.slopeLength = pcr.cover(self.slopeLength, pcr.mapmaximum(self.slopeLength)) # maps of relative elevation above flood plains dzRel = ['dzRel0001','dzRel0005', 'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050', 'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100'] if optionDict['topographyNC'] == str(None): for i in range(0, len(dzRel)): var = dzRel[i] input = optionDict[str(var)] vars(self)[var] = vos.readPCRmapClone(input,self.cloneMap, self.tmpDir,self.inputDir) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) if i > 0: vars(self)[var] = pcr.max(vars(self)[var], vars(self)[dzRel[i-1]]) else: for i in range(0, len(dzRel)): var = dzRel[i] vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(\ topoPropertiesNC,var, \ cloneMapFileName = self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) if i > 0: vars(self)[var] = pcr.max(vars(self)[var], vars(self)[dzRel[i-1]])
def sample(self, expression): """ Sampling the current values of 'expression' at the given locations for the current timestep """ arrayRowPos = self._userModel.currentTimeStep() - self._userModel.firstTimeStep() #if isinstance(expression, float): # expression = pcraster.scalar(expression) try: # store the data type for tss file header if self._spatialDatatype == None: self._spatialDatatype = str(expression.dataType()) except AttributeError as e: datatype, sep, tail = str(e).partition(" ") msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % (datatype) raise AttributeError(msg) if self._spatialIdGiven: if expression.dataType() == pcraster.Scalar or expression.dataType() == pcraster.Directional: tmp = pcraster.areaaverage(pcraster.spatial(expression), pcraster.spatial(self._spatialId)) else: tmp = pcraster.areamajority(pcraster.spatial(expression), pcraster.spatial(self._spatialId)) col = 0 for cellIndex in self._sampleAddresses: value, valid = pcraster.cellvalue(tmp, cellIndex) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos][col] = value col += 1 else: if expression.dataType() == pcraster.Scalar or expression.dataType() == pcraster.Directional: tmp = pcraster.maptotal(pcraster.spatial(expression))\ / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression)))) else: tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\ pcraster.spatial(pcraster.nominal(1))))) value, valid = pcraster.cellvalue(tmp, 1) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos] = value if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps(): self._writeTssFile()
def testReportNonSpatial(self): raster = pcraster.readmap("abs_Expr.map") max1 = pcraster.mapmaximum(raster) value, isValid = pcraster.cellvalue(max1, 1) self.assertTrue(isinstance(value, float)) self.assertEqual(isValid, True) self.assertEqual(value, 14.0) pcraster.report(max1, "maximum.map") max2 = pcraster.readmap("maximum.map") for i in range(1, 8): value, isValid = pcraster.cellvalue(max2, i) self.assertEqual(isValid, True) self.assertTrue(isinstance(value, float)) self.assertEqual(value, 14.0)
def __init__(self, tssFilename, model, idMap=None, noHeader=False): """ """ if not isinstance(tssFilename, str): raise Exception("timeseries output filename must be of type string") self._outputFilename = tssFilename self._maxId = 1 self._spatialId = None self._spatialDatatype = None self._spatialIdGiven = False self._userModel = model self._writeHeader = not noHeader # array to store the timestep values self._sampleValues = None _idMap = False if isinstance(idMap, str) or isinstance(idMap, pcraster._pcraster.Field): _idMap = True nrRows = self._userModel.nrTimeSteps() - self._userModel.firstTimeStep() + 1 if _idMap: self._spatialId = idMap if isinstance(idMap, str): self._spatialId = pcraster.readmap(idMap) _allowdDataTypes = [pcraster.Nominal,pcraster.Ordinal,pcraster.Boolean] if self._spatialId.dataType() not in _allowdDataTypes: raise Exception("idMap must be of type Nominal, Ordinal or Boolean") if self._spatialId.isSpatial(): self._maxId, valid = pcraster.cellvalue(pcraster.mapmaximum(pcraster.ordinal(self._spatialId)), 1) else: self._maxId = 1 # cell indices of the sample locations self._sampleAddresses = [] for cellId in range(1, self._maxId + 1): self._sampleAddresses.append(self._getIndex(cellId)) self._spatialIdGiven = True nrCols = self._maxId self._sampleValues = [[Decimal("NaN")] * nrCols for _ in [0] * nrRows] else: self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows]
def find_outlet(ldd): """ Tries to find the outlet of the largest catchment in the Ldd Input: - Ldd Output: - outlet map (single point in the map) """ largest = pcr.mapmaximum(pcr.catchmenttotal(pcr.spatial(pcr.scalar(1.0)), ldd)) outlet = pcr.ifthen( pcr.catchmenttotal(1.0, ldd) == largest, pcr.spatial(pcr.scalar(1.0)) ) return outlet
def correction_per_aquifer(self, id): id = float(id); print id # identify aquifer mask aquifer_landmask = pcr.ifthen(self.margat_aquifer_map == pcr.nominal(id), pcr.boolean(1)) # obtain the logarithmic value of Margat value exp_margat_thick = pcr.cellvalue(\ pcr.mapmaximum(\ pcr.ifthen(aquifer_landmask, pcr.ln(self.margat_aquifer_thickness))), 1)[0] # obtain the logarithmic values of 'estimated thickness' exp_approx_thick = pcr.ifthen(aquifer_landmask, pcr.ln(self.approx_thick)) exp_approx_thick_array = pcr.pcr2numpy(exp_approx_thick, vos.MV) exp_approx_thick_array = exp_approx_thick_array[exp_approx_thick_array <> vos.MV] exp_approx_thick_array = exp_approx_thick_array[exp_approx_thick_array < 1000000.] # identify percentile exp_approx_minim = np.percentile(exp_approx_thick_array, 2.5); exp_approx_maxim = np.percentile(exp_approx_thick_array, 97.5); # correcting exp_approx_thick_correct = ( exp_approx_thick - exp_approx_minim ) / \ ( exp_approx_maxim - exp_approx_minim ) exp_approx_thick_correct = pcr.max(0.0, exp_approx_thick_correct ) exp_approx_thick_correct *= pcr.max(0.0,\ ( exp_margat_thick - exp_approx_minim ) ) exp_approx_thick_correct += pcr.min(exp_approx_minim, exp_approx_thick) # maximum thickness exp_approx_thick_correct = pcr.min(exp_margat_thick, exp_approx_thick_correct) # corrected thickness correct_thickness = pcr.exp(exp_approx_thick_correct) return correct_thickness
def waterBalanceCheck(fluxesIn,fluxesOut,preStorages,endStorages,processName,PrintOnlyErrors,dateStr,threshold=1e-5,landmask=None): """ Returns the water balance for a list of input, output, and storage map files """ # modified by Edwin (22 Apr 2013) inMap = pcr.spatial(pcr.scalar(0.0)) outMap = pcr.spatial(pcr.scalar(0.0)) dsMap = pcr.spatial(pcr.scalar(0.0)) for fluxIn in fluxesIn: inMap += fluxIn for fluxOut in fluxesOut: outMap += fluxOut for preStorage in preStorages: dsMap += preStorage for endStorage in endStorages: dsMap -= endStorage a,b,c = getMinMaxMean(inMap + dsMap- outMap) if abs(a) > threshold or abs(b) > threshold: if PrintOnlyErrors: print "WBError %s Min %f Max %f Mean %f" %(processName,a,b,c) print "" wb = inMap + dsMap - outMap maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
def dynamic(self): ##################### # * dynamic section # ##################### #-evaluation of the current date: return current month and the time step used #-reading in fluxes over land and water area for current time step [m/d] # and read in reservoir demand and surface water extraction [m3] try: self.landSurfaceQ= clippedRead.get(pcrm.generateNameT(landSurfaceQFileName,self.currentTimeStep())) except: pass try: self.potWaterSurfaceQ= clippedRead.get(pcrm.generateNameT(waterSurfaceQFileName,self.currentTimeStep())) except: pass #-surface water extraction and reservoir demand currently set to zero, should # be computed automatically and updated to reservoirs self.potSurfaceWaterExtraction= pcr.spatial(pcr.scalar(0.)) #self.waterBodies.demand= #self.reservoirDemandTSS.assignID(self.waterBodies.ID,self.currentTimeStep(),0.)*self.timeSec #-initialization of cumulative values of actual water extractions self.actWaterSurfaceQ= pcr.spatial(pcr.scalar(0.)) self.actSurfaceWaterExtraction= pcr.spatial(pcr.scalar(0.)) #-definition of sub-loop for routing scheme - explicit scheme has to satisfy Courant condition timeLimit= pcr.cellvalue(pcr.mapminimum((pcr.cover(pcr.ifthen(self.waterBodies.distribution == 0,\ self.channelLength/self.flowVelocity),\ self.timeSec/self.nrIterDefault)*self.timeSec/self.nrIterDefault)**0.5),1)[0] nrIter= int(self.timeSec/timeLimit) nrIter= min(nrIter,int(self.timeSec/300.)) while float(self.timeSec/nrIter) % 1 <> 0: nrIter+= 1 deltaTime= self.timeSec/nrIter #-sub-loop for current time step if self.currentDate.day == 1 or nrIter >= 24: print '\n*\tprocessing %s, currently using %d substeps of %d seconds\n' % \ (self.currentDate.date(),nrIter,deltaTime) #-update discharge and storage for nrICur in range(nrIter): #-initializing discharge for the current sub-timestep and fill in values # for channels and at outlets of waterbodies # * channels * estQ= pcr.ifthenelse((self.actualStorage > 0.) & (self.waterBodies.distribution == 0) ,\ (self.wettedArea/self.alphaQ)**(1./self.betaQ),0.) #estQ= pcr.ifthenelse((self.actualStorage > 0.) & (self.waterBodies.distribution == 0) ,\ #0.5*(self.Q+(self.wettedArea/self.alphaQ)**(1./self.betaQ)),0.) #estQ= pcr.min(estQ,self.actualStorage/deltaTime) self.report(estQ,'results/qest') self.Q= pcr.spatial(pcr.scalar(0.)) self.Q= pcr.ifthenelse(self.waterBodies.distribution == 0,\ pcr.kinematic(self.channelLDD,estQ,0.,self.alphaQ,\ self.betaQ,1,deltaTime,self.channelLength),self.Q) # * water bodies * self.waterBodies.dischargeUpdate() self.Q= self.waterBodies.returnMapValue(self.Q,self.waterBodies.actualQ) #-fluxes and resulting change in storage: first the local fluxes are evaluated # and aggregated over the water bodies where applicable; this includes the specific runoff [m/day/m2] # from input and the estimated extraction from surface water as volume per day [m3/day]; # specific runoff from the land surface is always positive whereas the fluxes over the water surface # are potential, including discharge, and are adjusted to match the availabe storage; to this end, # surface water storage and fluxes over water bodies are totalized and assigned to the outlet; # discharge is updated in a separate step, after vertical fluxes are compared to the actual storage deltaActualStorage= ((self.landFraction*self.landSurfaceQ+\ self.waterFraction*self.potWaterSurfaceQ)*self.cellArea-\ self.potSurfaceWaterExtraction)*float(self.duration)/nrIter deltaActualStorage= pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.ifthenelse(self.waterBodies.location != 0,\ pcr.areatotal(deltaActualStorage,self.waterBodies.distribution),0),\ deltaActualStorage) adjustmentRatio= pcr.ifthenelse(deltaActualStorage < 0.,\ pcr.min(1.,-self.actualStorage/deltaActualStorage),1.) self.actWaterSurfaceQ+= adjustmentRatio*self.potWaterSurfaceQ self.actSurfaceWaterExtraction+= adjustmentRatio*self.actSurfaceWaterExtraction deltaActualStorage*= adjustmentRatio #-local water balance check if testLocalWaterBalance: differenceActualStorage= self.actualStorage differenceActualStorage+= deltaActualStorage #-overall water balance check: net input self.cumulativeDeltaStorage+= pcr.catchmenttotal(deltaActualStorage,self.LDD) #-update storage first with local changes, then balance discharge with storage and update storage # with lateral flow and return value to water bodies self.actualStorage+= deltaActualStorage self.actualStorage= pcr.max(0.,self.actualStorage) self.Q= pcr.min(self.Q,self.actualStorage/deltaTime) deltaActualStorage= (-self.Q+pcr.upstream(self.LDD,self.Q))*deltaTime deltaActualStorage= pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.ifthenelse(self.waterBodies.location != 0,\ pcr.areatotal(deltaActualStorage,self.waterBodies.distribution),0),\ deltaActualStorage) self.actualStorage+= deltaActualStorage self.actualStorage= pcr.max(0.,self.actualStorage) self.waterBodies.actualStorage= self.waterBodies.retrieveMapValue(self.actualStorage) #-flooded fraction returned floodedFraction,floodedDepth,\ self.wettedArea,self.alphaQ= self.kinAlphaComposite(self.actualStorage,self.floodplainMask) self.wettedArea= self.waterBodies.returnMapValue(self.wettedArea,\ self.waterBodies.channelWidth+2.*self.waterBodies.updateWaterHeight()) self.waterFraction= pcr.ifthenelse(self.waterBodies.distribution == 0,\ pcr.max(self.waterFractionMask,floodedFraction),self.waterFractionMask) self.landFraction= pcr.max(0.,1.-self.waterFraction) self.flowVelocity= pcr.ifthenelse(self.wettedArea > 0,self.Q/self.wettedArea,0.) #-local water balance check if testLocalWaterBalance: differenceActualStorage+= deltaActualStorage differenceActualStorage-= self.actualStorage totalDifference= pcr.cellvalue(pcr.maptotal(differenceActualStorage),1)[0] minimumDifference= pcr.cellvalue(pcr.mapminimum(differenceActualStorage),1)[0] maximumDifference= pcr.cellvalue(pcr.mapmaximum(differenceActualStorage),1)[0] if abs(totalDifference) > 1.e-3: print 'water balance error: total %e; min %e; max %e' %\ (totalDifference,minimumDifference,maximumDifference) if reportLocalWaterBalance: pcr.report(differenceActualStorage,'mbe_%s.map' % self.currentDate.date()) #-overall water balance check: updating cumulative discharge and total storage [m3] self.totalDischarge+= self.Q*deltaTime self.totalStorage= pcr.catchmenttotal(self.actualStorage,self.LDD) #-check on occurrence of last day and report mass balance if self.currentDate == self.endDate: #-report initial maps pcr.report(self.Q,self.QIniMap) pcr.report(self.actualStorage,self.actualStorageIniMap) #-return relative and absolute water balance error per cell and # as total at basin outlets self.totalDischarge= pcr.ifthen((self.waterBodies.distribution == 0) | \ (self.waterBodies.location != 0),self.totalDischarge) self.cumulativeDeltaStorage= pcr.ifthen((self.waterBodies.distribution == 0) | \ (self.waterBodies.location != 0),self.cumulativeDeltaStorage) massBalanceError= self.totalStorage+self.totalDischarge-\ self.cumulativeDeltaStorage relMassBalanceError= 1.+pcr.ifthenelse(self.cumulativeDeltaStorage <> 0., massBalanceError/self.cumulativeDeltaStorage,0.) totalMassBalanceError= pcr.cellvalue(pcr.maptotal(pcr.ifthen(self.basinOutlet,\ massBalanceError)),1)[0] totalCumulativeDeltaStorage= pcr.cellvalue(pcr.maptotal(pcr.ifthen(self.basinOutlet,\ self.cumulativeDeltaStorage)),1)[0] if totalCumulativeDeltaStorage > 0: totalRelativeMassBalanceError= 1.+totalMassBalanceError/totalCumulativeDeltaStorage else: totalRelativeMassBalanceError= 1. #-report maps and echo value pcr.report(massBalanceError,mbeFileName) pcr.report(relMassBalanceError,mbrFileName) print '\n*\ttotal global mass balance error [m3]: %8.3g' % totalMassBalanceError print '\n*\trelative global mass balance error [-]: %5.3f' % totalRelativeMassBalanceError #-echo to screen: total mass balance error and completion of run print '\trun completed' #-end of day: return states and fluxes #-get surface water attributes? if getSurfaceWaterAttributes: #-compute the following secondary variables: # surface water area [m2]: area given dynamic surface water fraction # residence time [days]: volume over discharge, assigned -1 in case discharge is zero # surface water depth [m], weighed by channel and floodplain volume surfaceWaterArea= self.waterFraction*self.cellArea surfaceWaterArea= pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.ifthenelse(self.waterBodies.location != 0,\ pcr.areatotal(surfaceWaterArea,self.waterBodies.distribution),0),\ surfaceWaterArea) surfaceWaterResidenceTime= pcr.ifthenelse(self.Q > 0.,self.actualStorage/(self.Q*self.timeSec),-1) surfaceWaterDepth= pcr.ifthenelse(self.actualStorage > 0.,\ pcr.max(0.,self.actualStorage-self.channelStorageCapacity)**2/\ (self.actualStorage*surfaceWaterArea),0.) surfaceWaterDepth+= pcr.ifthenelse(self.actualStorage > 0.,\ pcr.min(self.channelStorageCapacity,self.actualStorage)**2/(self.waterFractionMask*\ self.cellArea*self.actualStorage),0.) #-reports: values at outlet of lakes or reservoirs are assigned to their full extent self.report(pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.areamaximum(surfaceWaterArea,self.waterBodies.distribution),surfaceWaterArea),\ surfaceWaterAreaFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.areamaximum(surfaceWaterResidenceTime,self.waterBodies.distribution),surfaceWaterResidenceTime),\ surfaceWaterResidenceTimeFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.areamaximum(surfaceWaterDepth,self.waterBodies.distribution),surfaceWaterDepth),\ surfaceWaterDepthFileName) #-reports on standard output: values at outlet of lakes or reservoirs are assigned to their full extent self.report(pcr.ifthenelse(self.waterBodies.distribution != 0, pcr.areamaximum(self.flowVelocity,self.waterBodies.distribution),self.flowVelocity),flowVelocityFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution != 0, pcr.areamaximum(self.Q,self.waterBodies.distribution),self.Q),QFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution == 0,\ floodedFraction,0.),floodedFractionFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution == 0,\ floodedDepth,0.),floodedDepthFileName) self.report(self.actualStorage,actualStorageFileName) #-update date for time step and report relevant daily output self.currentDate= self.currentDate+datetime.timedelta(self.duration)
def dynamic(self): ##################### # * dynamic section # ##################### #-evaluation of the current date: return current month and the time step used #-reading in fluxes over land and water area for current time step [m/d] # and read in reservoir demand and surface water extraction [m3] try: self.landSurfaceQ = clippedRead.get( pcrm.generateNameT(landSurfaceQFileName, self.currentTimeStep())) except: pass try: self.potWaterSurfaceQ = clippedRead.get( pcrm.generateNameT(waterSurfaceQFileName, self.currentTimeStep())) except: pass #-surface water extraction and reservoir demand currently set to zero, should # be computed automatically and updated to reservoirs self.potSurfaceWaterExtraction = pcr.spatial(pcr.scalar(0.)) #self.waterBodies.demand= #self.reservoirDemandTSS.assignID(self.waterBodies.ID,self.currentTimeStep(),0.)*self.timeSec #-initialization of cumulative values of actual water extractions self.actWaterSurfaceQ = pcr.spatial(pcr.scalar(0.)) self.actSurfaceWaterExtraction = pcr.spatial(pcr.scalar(0.)) #-definition of sub-loop for routing scheme - explicit scheme has to satisfy Courant condition timeLimit= pcr.cellvalue(pcr.mapminimum((pcr.cover(pcr.ifthen(self.waterBodies.distribution == 0,\ self.channelLength/self.flowVelocity),\ self.timeSec/self.nrIterDefault)*self.timeSec/self.nrIterDefault)**0.5),1)[0] nrIter = int(self.timeSec / timeLimit) nrIter = min(nrIter, int(self.timeSec / 300.)) while float(self.timeSec / nrIter) % 1 <> 0: nrIter += 1 deltaTime = self.timeSec / nrIter #-sub-loop for current time step if self.currentDate.day == 1 or nrIter >= 24: print '\n*\tprocessing %s, currently using %d substeps of %d seconds\n' % \ (self.currentDate.date(),nrIter,deltaTime) #-update discharge and storage for nrICur in range(nrIter): #-initializing discharge for the current sub-timestep and fill in values # for channels and at outlets of waterbodies # * channels * estQ= pcr.ifthenelse((self.actualStorage > 0.) & (self.waterBodies.distribution == 0) ,\ (self.wettedArea/self.alphaQ)**(1./self.betaQ),0.) #estQ= pcr.ifthenelse((self.actualStorage > 0.) & (self.waterBodies.distribution == 0) ,\ #0.5*(self.Q+(self.wettedArea/self.alphaQ)**(1./self.betaQ)),0.) #estQ= pcr.min(estQ,self.actualStorage/deltaTime) self.report(estQ, 'results/qest') self.Q = pcr.spatial(pcr.scalar(0.)) self.Q= pcr.ifthenelse(self.waterBodies.distribution == 0,\ pcr.kinematic(self.channelLDD,estQ,0.,self.alphaQ,\ self.betaQ,1,deltaTime,self.channelLength),self.Q) # * water bodies * self.waterBodies.dischargeUpdate() self.Q = self.waterBodies.returnMapValue(self.Q, self.waterBodies.actualQ) #-fluxes and resulting change in storage: first the local fluxes are evaluated # and aggregated over the water bodies where applicable; this includes the specific runoff [m/day/m2] # from input and the estimated extraction from surface water as volume per day [m3/day]; # specific runoff from the land surface is always positive whereas the fluxes over the water surface # are potential, including discharge, and are adjusted to match the availabe storage; to this end, # surface water storage and fluxes over water bodies are totalized and assigned to the outlet; # discharge is updated in a separate step, after vertical fluxes are compared to the actual storage deltaActualStorage= ((self.landFraction*self.landSurfaceQ+\ self.waterFraction*self.potWaterSurfaceQ)*self.cellArea-\ self.potSurfaceWaterExtraction)*float(self.duration)/nrIter deltaActualStorage= pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.ifthenelse(self.waterBodies.location != 0,\ pcr.areatotal(deltaActualStorage,self.waterBodies.distribution),0),\ deltaActualStorage) adjustmentRatio= pcr.ifthenelse(deltaActualStorage < 0.,\ pcr.min(1.,-self.actualStorage/deltaActualStorage),1.) self.actWaterSurfaceQ += adjustmentRatio * self.potWaterSurfaceQ self.actSurfaceWaterExtraction += adjustmentRatio * self.actSurfaceWaterExtraction deltaActualStorage *= adjustmentRatio #-local water balance check if testLocalWaterBalance: differenceActualStorage = self.actualStorage differenceActualStorage += deltaActualStorage #-overall water balance check: net input self.cumulativeDeltaStorage += pcr.catchmenttotal( deltaActualStorage, self.LDD) #-update storage first with local changes, then balance discharge with storage and update storage # with lateral flow and return value to water bodies self.actualStorage += deltaActualStorage self.actualStorage = pcr.max(0., self.actualStorage) self.Q = pcr.min(self.Q, self.actualStorage / deltaTime) deltaActualStorage = (-self.Q + pcr.upstream(self.LDD, self.Q)) * deltaTime deltaActualStorage= pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.ifthenelse(self.waterBodies.location != 0,\ pcr.areatotal(deltaActualStorage,self.waterBodies.distribution),0),\ deltaActualStorage) self.actualStorage += deltaActualStorage self.actualStorage = pcr.max(0., self.actualStorage) self.waterBodies.actualStorage = self.waterBodies.retrieveMapValue( self.actualStorage) #-flooded fraction returned floodedFraction,floodedDepth,\ self.wettedArea,self.alphaQ= self.kinAlphaComposite(self.actualStorage,self.floodplainMask) self.wettedArea= self.waterBodies.returnMapValue(self.wettedArea,\ self.waterBodies.channelWidth+2.*self.waterBodies.updateWaterHeight()) self.waterFraction= pcr.ifthenelse(self.waterBodies.distribution == 0,\ pcr.max(self.waterFractionMask,floodedFraction),self.waterFractionMask) self.landFraction = pcr.max(0., 1. - self.waterFraction) self.flowVelocity = pcr.ifthenelse(self.wettedArea > 0, self.Q / self.wettedArea, 0.) #-local water balance check if testLocalWaterBalance: differenceActualStorage += deltaActualStorage differenceActualStorage -= self.actualStorage totalDifference = pcr.cellvalue( pcr.maptotal(differenceActualStorage), 1)[0] minimumDifference = pcr.cellvalue( pcr.mapminimum(differenceActualStorage), 1)[0] maximumDifference = pcr.cellvalue( pcr.mapmaximum(differenceActualStorage), 1)[0] if abs(totalDifference) > 1.e-3: print 'water balance error: total %e; min %e; max %e' %\ (totalDifference,minimumDifference,maximumDifference) if reportLocalWaterBalance: pcr.report(differenceActualStorage, 'mbe_%s.map' % self.currentDate.date()) #-overall water balance check: updating cumulative discharge and total storage [m3] self.totalDischarge += self.Q * deltaTime self.totalStorage = pcr.catchmenttotal(self.actualStorage, self.LDD) #-check on occurrence of last day and report mass balance if self.currentDate == self.endDate: #-report initial maps pcr.report(self.Q, self.QIniMap) pcr.report(self.actualStorage, self.actualStorageIniMap) #-return relative and absolute water balance error per cell and # as total at basin outlets self.totalDischarge= pcr.ifthen((self.waterBodies.distribution == 0) | \ (self.waterBodies.location != 0),self.totalDischarge) self.cumulativeDeltaStorage= pcr.ifthen((self.waterBodies.distribution == 0) | \ (self.waterBodies.location != 0),self.cumulativeDeltaStorage) massBalanceError= self.totalStorage+self.totalDischarge-\ self.cumulativeDeltaStorage relMassBalanceError = 1. + pcr.ifthenelse( self.cumulativeDeltaStorage <> 0., massBalanceError / self.cumulativeDeltaStorage, 0.) totalMassBalanceError= pcr.cellvalue(pcr.maptotal(pcr.ifthen(self.basinOutlet,\ massBalanceError)),1)[0] totalCumulativeDeltaStorage= pcr.cellvalue(pcr.maptotal(pcr.ifthen(self.basinOutlet,\ self.cumulativeDeltaStorage)),1)[0] if totalCumulativeDeltaStorage > 0: totalRelativeMassBalanceError = 1. + totalMassBalanceError / totalCumulativeDeltaStorage else: totalRelativeMassBalanceError = 1. #-report maps and echo value pcr.report(massBalanceError, mbeFileName) pcr.report(relMassBalanceError, mbrFileName) print '\n*\ttotal global mass balance error [m3]: %8.3g' % totalMassBalanceError print '\n*\trelative global mass balance error [-]: %5.3f' % totalRelativeMassBalanceError #-echo to screen: total mass balance error and completion of run print '\trun completed' #-end of day: return states and fluxes #-get surface water attributes? if getSurfaceWaterAttributes: #-compute the following secondary variables: # surface water area [m2]: area given dynamic surface water fraction # residence time [days]: volume over discharge, assigned -1 in case discharge is zero # surface water depth [m], weighed by channel and floodplain volume surfaceWaterArea = self.waterFraction * self.cellArea surfaceWaterArea= pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.ifthenelse(self.waterBodies.location != 0,\ pcr.areatotal(surfaceWaterArea,self.waterBodies.distribution),0),\ surfaceWaterArea) surfaceWaterResidenceTime = pcr.ifthenelse( self.Q > 0., self.actualStorage / (self.Q * self.timeSec), -1) surfaceWaterDepth= pcr.ifthenelse(self.actualStorage > 0.,\ pcr.max(0.,self.actualStorage-self.channelStorageCapacity)**2/\ (self.actualStorage*surfaceWaterArea),0.) surfaceWaterDepth+= pcr.ifthenelse(self.actualStorage > 0.,\ pcr.min(self.channelStorageCapacity,self.actualStorage)**2/(self.waterFractionMask*\ self.cellArea*self.actualStorage),0.) #-reports: values at outlet of lakes or reservoirs are assigned to their full extent self.report(pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.areamaximum(surfaceWaterArea,self.waterBodies.distribution),surfaceWaterArea),\ surfaceWaterAreaFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.areamaximum(surfaceWaterResidenceTime,self.waterBodies.distribution),surfaceWaterResidenceTime),\ surfaceWaterResidenceTimeFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution != 0,\ pcr.areamaximum(surfaceWaterDepth,self.waterBodies.distribution),surfaceWaterDepth),\ surfaceWaterDepthFileName) #-reports on standard output: values at outlet of lakes or reservoirs are assigned to their full extent self.report( pcr.ifthenelse( self.waterBodies.distribution != 0, pcr.areamaximum(self.flowVelocity, self.waterBodies.distribution), self.flowVelocity), flowVelocityFileName) self.report( pcr.ifthenelse( self.waterBodies.distribution != 0, pcr.areamaximum(self.Q, self.waterBodies.distribution), self.Q), QFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution == 0,\ floodedFraction,0.),floodedFractionFileName) self.report(pcr.ifthenelse(self.waterBodies.distribution == 0,\ floodedDepth,0.),floodedDepthFileName) self.report(self.actualStorage, actualStorageFileName) #-update date for time step and report relevant daily output self.currentDate = self.currentDate + datetime.timedelta(self.duration)
def getMinMaxMean(mapFile): mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0] mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0] nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 ) [0] #/ getNumNonMissingValues(mapFile) return mn,mx,(getMapTotal(mapFile) / nrValues)
def dynamic(self): """ dynamic part of the output module """ # ************************************************************ # ***** WRITING RESULTS: TIME SERIES ************************* # ************************************************************ # xxx=catchmenttotal(self.var.SurfaceRunForest * self.var.PixelArea, self.var.Ldd) * self.var.InvUpArea # self.var.Tss['DisTS'].sample(xxx) # self.report(self.Precipitation,binding['TaMaps']) # if fast init than without time series settings = LisSettings.instance() option = settings.options binding = settings.binding flags = settings.flags report_time_serie_act = settings.report_timeseries report_maps_end = settings.report_maps_end report_maps_steps = settings.report_maps_steps report_maps_all = settings.report_maps_all if not (option['InitLisfloodwithoutSplit']): if flags['loud']: # print the discharge of the first output map loc try: print(" %10.2f" % self.var.Tss["DisTS"].firstout( decompress(self.var.ChanQAvg))) except: pass for tss in report_time_serie_act: # report time series what = 'self.var.' + report_time_serie_act[tss].output_var how = report_time_serie_act[tss].operation[0] if len( report_time_serie_act[tss].operation) else '' if how == 'mapmaximum': changed = compressArray(mapmaximum(decompress(eval(what)))) what = 'changed' if how == 'total': changed = compressArray( catchmenttotal( decompress(eval(what)) * self.var.PixelAreaPcr, self.var.Ldd) * self.var.InvUpArea) what = 'changed' self.var.Tss[tss].sample(decompress(eval(what))) # ************************************************************ # ***** WRITING RESULTS: MAPS ****************************** # ************************************************************ # started nicely but now it becomes way to complicated, I am not happy about the next part -> has to be chaged checkifdouble = [] # list to check if map is reported more than once monthly = False yearly = False # Report END maps for maps in report_maps_end.keys(): # report end map filename if settings.mc_set: # MonteCarlo model where = os.path.join(str(self.var.currentSampleNumber()), binding[maps].split("/")[-1]) else: where = binding.get(maps) if not where: continue what = 'self.var.' + report_maps_end[maps].output_var if where not in checkifdouble: checkifdouble.append(where) # checks if saved at same place, if no: add to list if self.var.currentTimeStep() == self.var.nrTimeSteps(): # final step: Write end maps # Get start date for reporting start step # (last step indeed) reportStartDate = inttodate(self.var.currentTimeStep() - 1, self.var.CalendarDayStart) # if suffix with '.' is part of the filename report with # suffix head, tail = os.path.split(where) if '.' in tail: if option['writeNetcdf']: # CM mod: write end map to netCDF file (single) # CM ########################## try: writenet(0, eval(what), where, self.var.DtDay, maps, report_maps_end[maps].output_var, report_maps_end[maps].unit, 'f4', reportStartDate, self.var.currentTimeStep(), self.var.currentTimeStep()) except Exception as e: print(str(e), 'END', what, where, self.var.DtDay, maps, report_maps_end[maps].output_var, report_maps_end[maps].unit, 'f4', reportStartDate, self.var.currentTimeStep(), self.var.currentTimeStep()) ################################ else: report(decompress(eval(what)), str(where)) else: if option['writeNetcdfStack']: try: writenet(0, eval(what), where, self.var.DtDay, maps, report_maps_end[maps].output_var, report_maps_end[maps].unit, 'f4', reportStartDate, self.var.currentTimeStep(), self.var.currentTimeStep()) except Exception as e: print(str(e), 'END', what, where, self.var.DtDay, maps, report_maps_end[maps].output_var, report_maps_end[maps].unit, 'f4', reportStartDate, self.var.currentTimeStep(), self.var.currentTimeStep()) ########################### else: self.var.report(decompress(eval(what)), str(where)) # Report REPORTSTEPS maps for maps in report_maps_steps.keys(): # report reportsteps maps if settings.mc_set: # MonteCarlo model where = os.path.join(str(self.var.currentSampleNumber()), binding[maps].split("/")[-1]) else: where = binding.get(maps) if not where: continue what = 'self.var.' + report_maps_steps[maps].output_var if not (where in checkifdouble): checkifdouble.append(where) # checks if saved at same place, if no: add to list if self.var.currentTimeStep() in self.var.ReportSteps: flagcdf = 1 # index flag for writing nedcdf = 1 (=steps) -> indicated if a netcdf is created or maps are appended frequency = "all" try: if report_maps_steps[maps].monthly: monthly = True flagcdf = 3 # set to monthly (step) flag frequency = "monthly" except: monthly = False try: if report_maps_steps[maps].yearly: yearly = True flagcdf = 4 # set to yearly (step) flag frequency = "annual" except: yearly = False if (monthly and self.var.monthend) or ( yearly and self.var.yearend) or not (monthly or yearly): # checks if a flag monthly or yearly exists if option['writeNetcdfStack']: # Get start date for reporting start step reportStartDate = inttodate( self.var.ReportSteps[0] - 1, self.var.CalendarDayStart) # get step number for first reporting step reportStepStart = 1 # get step number for last reporting step reportStepEnd = self.var.ReportSteps[ -1] - self.var.ReportSteps[0] + 1 cdfflags = CDFFlags.instance() try: writenet(cdfflags[flagcdf], eval(what), where, self.var.DtDay, maps, report_maps_steps[maps].output_var, report_maps_steps[maps].unit, 'f4', reportStartDate, reportStepStart, reportStepEnd, frequency) except Exception as e: print(" +----> ERR: {}".format(str(e))) print( "REP flag:{} - {} {} {} {} {} {} {} {} {} {}" .format(cdfflags[flagcdf], what, where, self.var.DtDay, maps, report_maps_steps[maps].output_var, report_maps_steps[maps].unit, 'f4', reportStartDate, reportStepStart, reportStepEnd)) else: self.var.report(decompress(eval(what)), str(where)) # Report ALL maps for maps in report_maps_all.keys(): # report maps for all timesteps if settings.mc_set: where = os.path.join(str(self.var.currentSampleNumber()), binding[maps].split("/")[-1]) else: where = binding.get(maps) if not where: continue what = 'self.var.' + report_maps_all[maps].output_var if where not in checkifdouble: checkifdouble.append(where) # checks if saved at same place, if no: add to list # index flag for writing nedcdf = 1 (=all) -> indicated if a netcdf is created or maps are appended # cannot check only if netcdf exists, because than an old netcdf will be used accidently flagcdf = 2 frequency = "all" try: if report_maps_all[maps].monthly: monthly = True flagcdf = 5 # set to monthly flag frequency = "monthly" except: monthly = False try: if report_maps_all[maps].yearly: yearly = True flagcdf = 6 # set to yearly flag frequency = "annual" except: yearly = False if (monthly and self.var.monthend) or ( yearly and self.var.yearend) or not (monthly or yearly): # checks if a flag monthly or yearly exists] if option['writeNetcdfStack']: #Get start date for reporting start step reportStartDate = inttodate( binding['StepStartInt'] - 1, self.var.CalendarDayStart) # CM: get step number for first reporting step which is always the first simulation step # CM: first simulation step referred to reportStartDate ##reportStepStart = int(binding['StepStart']) reportStepStart = 1 #get step number for last reporting step which is always the last simulation step #last simulation step referred to reportStartDate reportStepEnd = binding['StepEndInt'] - binding[ 'StepStartInt'] + 1 try: cdfflags = CDFFlags.instance() writenet(cdfflags[flagcdf], eval(what), where, self.var.DtDay, maps, report_maps_all[maps].output_var, report_maps_all[maps].unit, 'f4', reportStartDate, reportStepStart, reportStepEnd, frequency) except Exception as e: warnings.warn(LisfloodWarning(str(e))) print(str(e), "ALL", what, where, self.var.DtDay, maps, report_maps_all[maps].output_var, report_maps_all[maps].unit, 'f4', reportStartDate, reportStepStart, reportStepEnd) else: self.var.report(decompress(eval(what)), trimPCRasterOutputPath(where)) cdfflags = CDFFlags.instance() # set the falg to indicate if a netcdffile has to be created or is only appended # if reportstep than increase the counter if self.var.currentTimeStep() in self.var.ReportSteps: # FIXME magic numbers. replace indexes with descriptive keys cdfflags.inc(1) # globals.cdfFlag[1] += 1 if self.var.monthend: # globals.cdfFlag[3] += 1 cdfflags.inc(3) if self.var.yearend: # globals.cdfFlag[4] += 1 cdfflags.inc(4) # increase the counter for report all maps cdfflags.inc(2) # globals.cdfFlag[2] += 1 if self.var.monthend: # globals.cdfFlag[5] += 1 cdfflags.inc(5) if self.var.yearend: # globals.cdfFlag[6] += 1 cdfflags.inc(6)
def main(): #-initialization # MVs MV= -999. # minimum catchment size to process catchmentSizeLimit= 0.0 # period of interest, start and end year startYear= 1961 endYear= 2010 # maps cloneMapFileName= '/data/hydroworld/PCRGLOBWB20/input30min/global/Global_CloneMap_30min.map' lddFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map' cellAreaFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/cellarea30min.map' # set clone pcr.setclone(cloneMapFileName) # output outputPath= '/scratch/rens/reservedrecharge' percentileMapFileName= os.path.join(outputPath,'q%03d_cumsec.map') textFileName= os.path.join(outputPath,'groundwater_environmentalflow_%d.txt') fractionReservedRechargeMapFileName= os.path.join(outputPath,'fraction_reserved_recharge%d.map') fractionMinimumReservedRechargeMapFileName= os.path.join(outputPath,'minimum_fraction_reserved_recharge%d.map') # input inputPath= '/nfsarchive/edwin-emergency-backup-DO-NOT-DELETE/rapid/edwin/05min_runs_results/2015_04_27/non_natural_2015_04_27/global/netcdf/' # define data to be read from netCDF files ncData= {} variableName= 'totalRunoff' ncData[variableName]= {} ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc') ncData[variableName]['fileRoot']= os.path.join(outputPath,'qloc') ncData[variableName]['annualAverage']= pcr.scalar(0) variableName= 'gwRecharge' ncData[variableName]= {} ncData[variableName]['fileName']= os.path.join(inputPath,'gwRecharge_monthTot_output.nc') ncData[variableName]['fileRoot']= os.path.join(outputPath,'gwrec') ncData[variableName]['annualAverage']= pcr.scalar(0) variableName= 'discharge' ncData[variableName]= {} ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc') ncData[variableName]['fileRoot']= os.path.join(outputPath,'qc') ncData[variableName]['annualAverage']= pcr.scalar(0) ncData[variableName]['mapStack']= np.array([]) # percents and environmental flow condition set as percentile percents= range(10,110,10) environmentalFlowPercent= 10 if environmentalFlowPercent not in percents: percents.append(environmentalFlowPercent) percents.sort() #-start # obtain attributes pcr.setclone(cloneMapFileName) cloneSpatialAttributes= spatialAttributes(cloneMapFileName) years= range(startYear,endYear+1) # output path if not os.path.isdir(outputPath): os.makedirs(outputPath) os.chdir(outputPath) # compute catchments ldd= pcr.readmap(lddFileName) cellArea= pcr.readmap(cellAreaFileName) catchments= pcr.catchment(ldd,pcr.pit(ldd)) fractionWater= pcr.scalar(0.0) # temporary! lakeMask= pcr.boolean(0) # temporary! pcr.report(catchments,os.path.join(outputPath,'catchments.map')) maximumCatchmentID= int(pcr.cellvalue(pcr.mapmaximum(pcr.scalar(catchments)),1)[0]) # iterate over years weight= float(len(years))**-1 for year in years: #-echo year print ' - processing year %d' % year #-process data startDate= datetime.datetime(year,1,1) endDate= datetime.datetime(year,12,31) timeSteps= endDate.toordinal()-startDate.toordinal()+1 dynamicIncrement= 1 for variableName in ncData.keys(): print ' extracting %s' % variableName, ncFileIn= ncData[variableName]['fileName'] #-process data pcrDataSet= pcrObject(variableName, ncData[variableName]['fileRoot'],\ ncFileIn,cloneSpatialAttributes, pcrVALUESCALE= pcr.Scalar, resamplingAllowed= True,\ dynamic= True, dynamicStart= startDate, dynamicEnd= endDate, dynamicIncrement= dynamicIncrement, ncDynamicDimension= 'time') pcrDataSet.initializeFileInfo() pcrDataSet.processFileInfo() for fileInfo in pcrDataSet.fileProcessInfo.values()[0]: tempFileName= fileInfo[1] variableField= pcr.readmap(tempFileName) variableField= pcr.ifthen(pcr.defined(ldd),pcr.cover(variableField,0)) if variableName == 'discharge': dayNumber= int(os.path.splitext(tempFileName)[1].strip('.')) date= datetime.date(year,1,1)+datetime.timedelta(dayNumber-1) numberDays= calendar.monthrange(year,date.month)[1] variableField= pcr.max(0,pcr.catchmenttotal(variableField*cellArea,ldd)/(numberDays*24*3600)) ncData[variableName]['annualAverage']+= weight*variableField if 'mapStack' in ncData[variableName].keys(): tempArray= pcr2numpy(variableField,MV) mask= tempArray != MV if ncData[variableName]['mapStack'].size != 0: ncData[variableName]['mapStack']= np.vstack((ncData[variableName]['mapStack'],tempArray[mask])) else: ncData[variableName]['mapStack']= tempArray[mask] coordinates= np.zeros((ncData[variableName]['mapStack'].size,2)) pcr.setglobaloption('unitcell') tempArray= pcr2numpy(pcr.ycoordinate(pcr.boolean(1))+0.5,MV) coordinates[:,0]= tempArray[mask] tempArray= pcr2numpy(pcr.xcoordinate(pcr.boolean(1))+0.5,MV) coordinates[:,1]= tempArray[mask] os.remove(tempFileName) # delete object pcrDataSet= None del pcrDataSet # close line on screen print # report annual averages key= 'annualAverage' ncData['discharge'][key]/= 12 for variableName in ncData.keys(): ncData[variableName][key]= pcr.max(0,ncData[variableName][key]) pcr.report(ncData[variableName][key],\ os.path.join(outputPath,'%s_%s.map' % (variableName,key))) # remove aux.xml for tempFileName in os.listdir(outputPath): if 'aux.xml' in tempFileName: os.remove(tempFileName) # sort data print 'sorting discharge data' variableName= 'discharge' key= 'mapStack' indices= np.zeros((ncData[variableName][key].shape),np.uint) for iCnt in xrange(ncData[variableName][key].shape[1]): indices[:,iCnt]= ncData[variableName][key][:,iCnt].argsort(kind= 'mergesort') ncData[variableName][key][:,iCnt]= ncData[variableName][key][:,iCnt][indices[:,iCnt]] # extract values for percentiles print 'returning maps' for percent in percents: percentile= 0.01*percent index0= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])) index1= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])+1) x0= float(index0)/ncData[variableName][key].shape[0] x1= float(index1)/ncData[variableName][key].shape[0] if x0 <> x1: y= ncData[variableName][key][index0,:]+(percentile-x0)*\ (ncData[variableName][key][index1,:]-ncData[variableName][key][index0,:])/(x1-x0) else: y= ncData[variableName][key][index0,:] # convert a slice of the stack into an array tempArray= np.ones((cloneSpatialAttributes.numberRows,cloneSpatialAttributes.numberCols))*MV for iCnt in xrange(coordinates.shape[0]): row= coordinates[iCnt,0]-1 col= coordinates[iCnt,1]-1 tempArray[row,col]= y[iCnt] variableField= numpy2pcr(pcr.Scalar,tempArray,MV) pcr.report(variableField,percentileMapFileName % percent) if percent == environmentalFlowPercent: ncData[variableName]['environmentalFlow']= variableField tempArray= None; variableField= None del tempArray, variableField # process environmental flow # initialize map of reserved recharge fraction fractionReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0)) fractionMinimumReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0)) textFile= open(textFileName % environmentalFlowPercent,'w') hStr= 'Environmental flow analysis per basin, resulting in a map of renewable, exploitable recharge, for the %d%s quantile of discharge\n' % (environmentalFlowPercent,'%') hStr+= 'Returns Q_%d/R, the fraction of reserved recharge needed to sustain fully the environental flow requirement defined as the %d percentile,\n' % (environmentalFlowPercent, environmentalFlowPercent) hStr+= 'and Q*_%d/R, a reduced fraction that takes the availability of surface water into account\n' % environmentalFlowPercent textFile.write(hStr) print hStr # create header to display on screen and write to file # reported are: 1: ID, 2: Area, 3: average discharge, 4: environmental flow, 5: average recharge, # 6: Q_%d/Q, 7: Q_%d/R_Avg, 8: R_Avg/Q_Avg, 9: Q*_%d/R_Avg hStr= '%6s,%15s,%15s,%15s,%15s,%15s,%15s,%15s,%15s\n' % \ ('ID','Area [km2]','Q_Avg [m3]','Q_%d [m3]' % environmentalFlowPercent ,'R_Avg [m3]','Q_%d/Q_Avg [-]' % environmentalFlowPercent,\ 'Q_%d/Q_Avg [-]' % environmentalFlowPercent,'R_Avg/Q_Avg [-]','Q*_%d/Q_Avg [-]' % environmentalFlowPercent) textFile.write(hStr) print hStr for catchment in xrange(1,maximumCatchmentID+1): # create catchment mask and check whether it does not coincide with a lake catchmentMask= catchments == catchment catchmentSize= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,cellArea*1.e-6)),1)[0] #~ ##~ if pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(lakeMask))),1) <> \ #~ ##~ pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(catchmentMask))),1)[0] and \ #~ ##~ catchmentSize > catchmentSizeLimit: key= 'annualAverage' variableName= 'discharge' if bool(pcr.cellvalue(pcr.maptotal(pcr.ifthen((ldd == 5) & catchmentMask,\ pcr.scalar(ncData[variableName][key] > 0))),1)[0]) and catchmentSize >= catchmentSizeLimit: # valid catchment, process # all volumes are in m3 per year key= 'annualAverage' catchmentAverageDischarge= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\ ncData[variableName][key])),1)[0]*365.25*3600*24 variableName= 'gwRecharge' catchmentRecharge= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\ (1.-fractionWater)*cellArea)),1)[0] variableName= 'totalRunoff' catchmentRunoff= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\ cellArea)),1)[0] key= 'environmentalFlow' variableName= 'discharge' catchmentEnvironmentalFlow= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\ ncData[variableName][key])),1)[0]*365.25*3600*24 catchmentRunoff= max(catchmentRunoff,catchmentEnvironmentalFlow) if catchmentAverageDischarge > 0.: fractionEnvironmentalFlow= catchmentEnvironmentalFlow/catchmentAverageDischarge fractionGroundWaterContribution= catchmentRecharge/catchmentAverageDischarge else: fractionEnvironmentalFlow= 0. fractionGroundWaterContribution= 0. if catchmentRecharge > 0: fractionReservedRecharge= min(1,catchmentEnvironmentalFlow/catchmentRecharge) else: fractionReservedRecharge= 1.0 fractionMinimumReservedRecharge= (fractionReservedRecharge+fractionGroundWaterContribution-\ fractionReservedRecharge*fractionGroundWaterContribution)*fractionReservedRecharge #~ # echo to screen, and write to file and map wStr= '%6s,%15.1f,%15.6g,%15.6g,%15.6g,%15.6f,%15.6f,%15.6f,%15.6f\n' % \ (catchment,catchmentSize,catchmentAverageDischarge,catchmentEnvironmentalFlow,catchmentRecharge,\ fractionEnvironmentalFlow,fractionReservedRecharge,fractionGroundWaterContribution,fractionMinimumReservedRecharge) print wStr textFile.write(wStr) # update maps fractionReservedRechargeMap= pcr.ifthenelse(catchmentMask,\ pcr.scalar(fractionReservedRecharge),fractionReservedRechargeMap) fractionMinimumReservedRechargeMap= pcr.ifthenelse(catchmentMask,\ pcr.scalar(fractionMinimumReservedRecharge),fractionMinimumReservedRechargeMap) #-report map and close text file pcr.report(fractionReservedRechargeMap,fractionReservedRechargeMapFileName % environmentalFlowPercent) pcr.report(fractionMinimumReservedRechargeMap,fractionMinimumReservedRechargeMapFileName % environmentalFlowPercent) # close text file textFile.close() # finished print 'all done!'
def waterBalance( fluxesIn, fluxesOut, deltaStorages, processName, PrintOnlyErrors, dateStr,threshold=1e-5): """ Returns the water balance for a list of input, output, and storage map files and """ inMap = pcr.spatial(pcr.scalar(0.0)) dsMap = pcr.spatial(pcr.scalar(0.0)) outMap = pcr.spatial(pcr.scalar(0.0)) inflow = 0 outflow = 0 deltaS = 0 for fluxIn in fluxesIn: inflow += getMapTotal(fluxIn) inMap += fluxIn for fluxOut in fluxesOut: outflow += getMapTotal(fluxOut) outMap += fluxOut for deltaStorage in deltaStorages: deltaS += getMapTotal(deltaStorage) dsMap += deltaStorage #if PrintOnlyErrors: a,b,c = getMinMaxMean(inMap + dsMap- outMap) # if abs(a) > 1e-5 or abs(b) > 1e-5: # if abs(a) > 1e-4 or abs(b) > 1e-4: if abs(a) > threshold or abs(b) > threshold: print "WBError %s Min %f Max %f Mean %f" %(processName,a,b,c) # if abs(inflow + deltaS - outflow) > 1e-5: # print "Water balance Error for %s on %s: in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \ # %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow) #else: # print "Water balance for %s: on %s in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \ # %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow) wb = inMap + dsMap - outMap maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0] #if maxWBError > 0.001 / 1000: #row = 0 #col = 0 #cellID = 1 #troubleCell = 0 #print "Water balance for %s on %s: %f mm !!! " %(processName,dateStr,maxWBError * 1000) #pcr.report(wb,"%s-WaterBalanceError-%s" %(processName,dateStr)) #npWBMError = pcr2numpy(wb, -9999) #(nr, nc) = np.shape(npWBMError) #for r in range(0, nr): #for c in range(0, nc): ## print r,c #if npWBMError[r, c] != -9999.0: #val = npWBMError[r, c] #if math.fabs(val) > 0.0001 / 1000: ## print npWBMError[r,c] #row = r #col = c #troubleCell = cellID #cellID += 1 #print 'Water balance for %s on %s: %f mm row %i col %i cellID %i!!! ' % ( #processName, #dateStr, #maxWBError * 1000, #row, #col, #troubleCell, #) return inMap + dsMap - outMap
def __init__(self, iniItems, landmask): object.__init__(self) # cloneMap, temporary directory, absolute path for input directory, landmask self.cloneMap = iniItems.cloneMap self.tmpDir = iniItems.tmpDir self.inputDir = iniItems.globalOptions['inputDir'] self.landmask = landmask # configuration from the ini file self.iniItems = iniItems # topography properties: read several variables from the netcdf file for var in ['dem_minimum','dem_maximum','dem_average','dem_standard_deviation',\ 'slopeLength','orographyBeta','tanslope',\ 'dzRel0000','dzRel0001','dzRel0005',\ 'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050',\ 'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100']: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['topographyNC'], \ var, self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # channel properties: read several variables from the netcdf file for var in [ 'lddMap', 'cellAreaMap', 'gradient', 'bankfull_width', 'bankfull_depth', 'dem_floodplain', 'dem_riverbed' ]: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['channelNC'], \ var, self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # minimum channel width minimum_channel_width = 0.1 self.bankfull_width = pcr.max(minimum_channel_width, self.bankfull_width) #~ # cell fraction if channel water reaching the flood plan # NOT USED #~ self.flood_plain_fraction = self.return_innundation_fraction(pcr.max(0.0, self.dem_floodplain - self.dem_minimum)) # coefficient of Manning self.manningsN = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['manningsN'],\ self.cloneMap,self.tmpDir,self.inputDir) # minimum channel gradient minGradient = 0.00005 self.gradient = pcr.max(minGradient, pcr.cover(self.gradient, minGradient)) # correcting lddMap self.lddMap = pcr.ifthen(pcr.scalar(self.lddMap) > 0.0, self.lddMap) self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap)) # channelLength = approximation of channel length (unit: m) # This is approximated by cell diagonal. cellSizeInArcMin = np.round(pcr.clone().cellSize() * 60.) verticalSizeInMeter = cellSizeInArcMin * 1852. self.channelLength = ((self.cellAreaMap/verticalSizeInMeter)**(2)+\ (verticalSizeInMeter)**(2))**(0.5) # option for lakes and reservoir self.onlyNaturalWaterBodies = False if self.iniItems.modflowParameterOptions[ 'onlyNaturalWaterBodies'] == "True": self.onlyNaturalWaterBodies = True # groundwater linear recession coefficient (day-1) ; the linear reservoir concept is still being used to represent fast response flow # particularly from karstic aquifer in mountainous regions self.recessionCoeff = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\ 'recessionCoeff', self.cloneMap) self.recessionCoeff = pcr.cover(self.recessionCoeff, 0.00) self.recessionCoeff = pcr.min(1.0000, self.recessionCoeff) # if 'minRecessionCoeff' in iniItems.modflowParameterOptions.keys(): minRecessionCoeff = float( iniItems.modflowParameterOptions['minRecessionCoeff']) else: minRecessionCoeff = 1.0e-4 # This is the minimum value used in Van Beek et al. (2011). self.recessionCoeff = pcr.max(minRecessionCoeff, self.recessionCoeff) # aquifer saturated conductivity (m/day) self.kSatAquifer = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\ 'kSatAquifer', self.cloneMap) self.kSatAquifer = pcr.cover(self.kSatAquifer, pcr.mapmaximum(self.kSatAquifer)) self.kSatAquifer = pcr.max(0.010, self.kSatAquifer) self.kSatAquifer *= 0.001 # aquifer specific yield (dimensionless) self.specificYield = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\ 'specificYield', self.cloneMap) self.specificYield = pcr.cover(self.specificYield, pcr.mapmaximum(self.specificYield)) self.specificYield = pcr.max( 0.010, self.specificYield ) # TODO: TO BE CHECKED: The resample process of specificYield self.specificYield = pcr.min(1.000, self.specificYield) # estimate of thickness (unit: m) of accesible groundwater totalGroundwaterThickness = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['estimateOfTotalGroundwaterThicknessNC'],\ 'thickness', self.cloneMap) # extrapolation totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,\ pcr.windowaverage(totalGroundwaterThickness, 1.0)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,\ pcr.windowaverage(totalGroundwaterThickness, 1.5)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, 0.0) # # set minimum thickness minimumThickness = pcr.scalar(float(\ self.iniItems.modflowParameterOptions['minimumTotalGroundwaterThickness'])) totalGroundwaterThickness = pcr.max(minimumThickness, totalGroundwaterThickness) # # set maximum thickness: 250 m. maximumThickness = 250. self.totalGroundwaterThickness = pcr.min(maximumThickness, totalGroundwaterThickness) # river bed resistance (unit: day) self.bed_resistance = 1.0 # option to ignore capillary rise self.ignoreCapRise = True if self.iniItems.modflowParameterOptions['ignoreCapRise'] == "False": self.ignoreCapRise = False # initiate old style reporting # TODO: remove this! self.initiate_old_style_groundwater_reporting(iniItems)
def subcatch_stream( ldd, threshold, min_strahler=-999, max_strahler=999, assign_edge=False, assign_existing=False, up_area=None, ): """ (From Deltares Hydrotools) Derive catchments based upon strahler threshold Input: ldd -- pcraster object direction, local drain directions threshold -- integer, strahler threshold, subcatchments ge threshold are derived min_strahler -- integer, minimum strahler threshold of river catchments to return max_strahler -- integer, maximum strahler threshold of river catchments to return assign_unique=False -- if set to True, unassigned connected areas at the edges of the domain are assigned a unique id as well. If set to False, edges are not assigned assign_existing=False == if set to True, unassigned edges are assigned to existing basins with an upstream weighting. If set to False, edges are assigned to unique IDs, or not assigned output: stream_ge -- pcraster object, streams of strahler order ge threshold subcatch -- pcraster object, subcatchments of strahler order ge threshold """ # derive stream order stream = pcr.streamorder(ldd) stream_ge = pcr.ifthen(stream >= threshold, stream) stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0))) # detect any transfer of strahler order, to a higher strahler order. transition_strahler = pcr.ifthenelse( pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1), pcr.ifthenelse( pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse( pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1), pcr.boolean(0), ), ), ) # make unique ids (write to file) transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler)) # derive upstream catchment areas (write to file) subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique)) if assign_edge: # fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far unique_edge = pcr.clump(pcr.ifthen(subcatch == 0, pcr.ordinal(0))) subcatch = pcr.ifthenelse( subcatch == 0, pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)), pcr.nominal(subcatch), ) elif assign_existing: # unaccounted areas are added to largest nearest draining basin if up_area is None: up_area = pcr.ifthen( pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1) ) riverid = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), subcatch) friction = 1.0 / pcr.scalar( pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0) ) # *(pcr.scalar(ldd)*0+1) delta = pcr.ifthen( pcr.scalar(ldd) >= 0, pcr.ifthen( pcr.cover(subcatch, 0) == 0, pcr.spreadzone(pcr.cover(riverid, 0), 0, friction), ), ) subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)), subcatch, delta) # finally, only keep basins with minimum and maximum river order flowing through them strahler_subcatch = pcr.areamaximum(stream, subcatch) subcatch = pcr.ifthen( pcr.ordinal(strahler_subcatch) >= min_strahler, pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch), ) return stream_ge, pcr.ordinal(subcatch)
def subcatch_order_b( ldd, oorder, sizelimit=0, fill=False, fillcomplete=False, stoporder=0 ): """ Determines subcatchments using the catchment order This version tries to keep the number op upstream/downstream catchment the small by first dederivingatchment connected to the major river(the order) given, and fill up from there. Input: - ldd - oorder - order to use - sizelimit - smallest catchments to include, default is all (sizelimit=0) in number of cells - if fill is set to True the higer order catchment are filled also - if fillcomplete is set to True the whole ldd is filled with catchments. :returns sc, dif, nldd; Subcatchment, Points, subcatchldd """ # outl = find_outlet(ldd) # large = pcr.subcatchment(ldd,pcr.boolean(outl)) if stoporder == 0: stoporder = oorder stt = pcr.streamorder(ldd) sttd = pcr.downstream(ldd, stt) pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(stt)) > 0.0, sttd) maxorder = pcraster.framework.getCellValue(pcr.mapmaximum(stt), 1, 1) dif = pcr.uniqueid(pcr.boolean(pcr.ifthen(stt == pcr.ordinal(oorder), pts))) if fill: for order in range(oorder, maxorder): m_pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(order)) > 0.0, sttd) m_dif = pcr.uniqueid( pcr.boolean(pcr.ifthen(stt == pcr.ordinal(order), m_pts)) ) dif = pcr.uniqueid(pcr.boolean(pcr.cover(m_dif, dif))) for myorder in range(oorder - 1, stoporder, -1): sc = pcr.subcatchment(ldd, pcr.nominal(dif)) m_pts = pcr.ifthen((pcr.scalar(sttd) - pcr.scalar(stt)) > 0.0, sttd) m_dif = pcr.uniqueid( pcr.boolean(pcr.ifthen(stt == pcr.ordinal(myorder - 1), m_pts)) ) dif = pcr.uniqueid( pcr.boolean(pcr.cover(pcr.ifthen(pcr.scalar(sc) == 0, m_dif), dif)) ) if fillcomplete: sc = pcr.subcatchment(ldd, pcr.nominal(dif)) cs, m_dif, stt = subcatch_order_a(ldd, stoporder) dif = pcr.uniqueid( pcr.boolean( pcr.cover( pcr.ifthen(pcr.scalar(sc) == 0, pcr.ordinal(m_dif)), pcr.ordinal(dif), ) ) ) scsize = pcr.catchmenttotal(1, ldd) dif = pcr.ordinal(pcr.uniqueid(pcr.boolean(pcr.ifthen(scsize >= sizelimit, dif)))) sc = pcr.subcatchment(ldd, dif) # Make pit ldd nldd = pcr.lddrepair(pcr.ifthenelse(pcr.cover(dif, 0) > 0, 5, ldd)) return sc, dif, nldd
streamorder = pcr.ordinal(pcr.streamorder(ldd)) river = pcr.boolean( pcr.ifthen( streamorder >= int(min(np.max(pcr.pcr2numpy(streamorder, -9999)), minorder)), streamorder, ) ) outlets = pcr.ifthen(pcr.ordinal(ldd) == 5, pcr.boolean(1)) outlets = pcr.nominal(pcr.uniqueid(outlets)) catchments = pcr.nominal(pcr.catchment(ldd, outlets)) if not keepall: catchments = pcr.nominal( pcr.ifthen( pcr.mapmaximum( pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments)) ) == pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments)), catchments, ) ) pcr.report(ldd, ldd_map) pcr.report(streamorder, streamorder_map) pcr.report(river, river_map) pcr.report(catchments, catchments_map) if not EPSG == None: call( ( "gdal_translate", "-of",
def __init__(self, iniItems, landmask): object.__init__(self) # cloneMap, temporary directory for the resample process, temporary directory for the modflow process, absolute path for input directory, landmask self.cloneMap = iniItems.cloneMap self.tmpDir = iniItems.tmpDir self.tmp_modflow_dir = iniItems.tmp_modflow_dir self.inputDir = iniItems.globalOptions['inputDir'] self.landmask = landmask # configuration from the ini file self.iniItems = iniItems # topography properties: read several variables from the netcdf file for var in ['dem_minimum','dem_maximum','dem_average','dem_standard_deviation',\ 'slopeLength','orographyBeta','tanslope',\ 'dzRel0000','dzRel0001','dzRel0005',\ 'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050',\ 'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100']: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['topographyNC'], \ var, self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # channel properties: read several variables from the netcdf file for var in ['lddMap','cellAreaMap','gradient','bankfull_width', 'bankfull_depth','dem_floodplain','dem_riverbed']: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['channelNC'], \ var, self.cloneMap) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # minimum channel width minimum_channel_width = 0.5 # TODO: Define this one in the configuration file self.bankfull_width = pcr.max(minimum_channel_width, self.bankfull_width) #~ # cell fraction if channel water reaching the flood plan # NOT USED YET #~ self.flood_plain_fraction = self.return_innundation_fraction(pcr.max(0.0, self.dem_floodplain - self.dem_minimum)) # coefficient of Manning self.manningsN = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['manningsN'],\ self.cloneMap,self.tmpDir,self.inputDir) # minimum channel gradient minGradient = 0.00005 # TODO: Define this one in the configuration file self.gradient = pcr.max(minGradient, pcr.cover(self.gradient, minGradient)) # correcting lddMap self.lddMap = pcr.ifthen(pcr.scalar(self.lddMap) > 0.0, self.lddMap) self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap)) # channelLength = approximation of channel length (unit: m) # This is approximated by cell diagonal. cellSizeInArcMin = np.round(pcr.clone().cellSize()*60.) # FIXME: This one will not work if you use the resolution: 0.5, 1.5, 2.5 arc-min verticalSizeInMeter = cellSizeInArcMin*1852. horizontalSizeInMeter = self.cellAreaMap/verticalSizeInMeter self.channelLength = ((horizontalSizeInMeter)**(2)+\ (verticalSizeInMeter)**(2))**(0.5) # option for lakes and reservoir self.onlyNaturalWaterBodies = False if self.iniItems.modflowParameterOptions['onlyNaturalWaterBodies'] == "True": self.onlyNaturalWaterBodies = True # groundwater linear recession coefficient (day-1) ; the linear reservoir concept is still being used to represent fast response flow # particularly from karstic aquifer in mountainous regions self.recessionCoeff = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\ 'recessionCoeff', self.cloneMap) self.recessionCoeff = pcr.cover(self.recessionCoeff,0.00) self.recessionCoeff = pcr.min(1.0000,self.recessionCoeff) # if 'minRecessionCoeff' in iniItems.modflowParameterOptions.keys(): minRecessionCoeff = float(iniItems.modflowParameterOptions['minRecessionCoeff']) else: minRecessionCoeff = 1.0e-4 # This is the minimum value used in Van Beek et al. (2011). self.recessionCoeff = pcr.max(minRecessionCoeff,self.recessionCoeff) # aquifer saturated conductivity (m/day) self.kSatAquifer = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\ 'kSatAquifer', self.cloneMap) self.kSatAquifer = pcr.cover(self.kSatAquifer,pcr.mapmaximum(self.kSatAquifer)) self.kSatAquifer = pcr.max(0.001,self.kSatAquifer) # TODO: Define the minimum value as part of the configuration file # aquifer specific yield (dimensionless) self.specificYield = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\ 'specificYield', self.cloneMap) self.specificYield = pcr.cover(self.specificYield,pcr.mapmaximum(self.specificYield)) self.specificYield = pcr.max(0.010,self.specificYield) # TODO: TO BE CHECKED: The resample process of specificYield self.specificYield = pcr.min(1.000,self.specificYield) # TODO: Define the minimum value as part of the configuration file # estimate of thickness (unit: m) of accesible groundwater totalGroundwaterThickness = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['estimateOfTotalGroundwaterThicknessNC'],\ 'thickness', self.cloneMap) # extrapolation totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,\ pcr.windowaverage(totalGroundwaterThickness, 1.0)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,\ pcr.windowaverage(totalGroundwaterThickness, 1.5)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, 0.0) # # set minimum thickness minimumThickness = pcr.scalar(float(\ self.iniItems.modflowParameterOptions['minimumTotalGroundwaterThickness'])) totalGroundwaterThickness = pcr.max(minimumThickness, totalGroundwaterThickness) # # set maximum thickness: 250 m. # TODO: Define this one as part of the ini file maximumThickness = 250. self.totalGroundwaterThickness = pcr.min(maximumThickness, totalGroundwaterThickness) # TODO: Define the maximum value as part of the configuration file # surface water bed thickness (unit: m) bed_thickness = 0.1 # TODO: Define this as part of the configuration file # surface water bed resistance (unit: day) bed_resistance = bed_thickness / (self.kSatAquifer) minimum_bed_resistance = 1.0 # TODO: Define this as part of the configuration file self.bed_resistance = pcr.max(minimum_bed_resistance,\ bed_resistance,) # option to ignore capillary rise self.ignoreCapRise = True if self.iniItems.modflowParameterOptions['ignoreCapRise'] == "False": self.ignoreCapRise = False # a variable to indicate if the modflow has been called or not self.modflow_has_been_called = False # list of the convergence criteria for HCLOSE (unit: m) # - Deltares default's value is 0.001 m # check this value with Jarno self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] self.criteria_HCLOSE = sorted(self.criteria_HCLOSE) # list of the convergence criteria for RCLOSE (unit: m3) # - Deltares default's value for their 25 and 250 m resolution models is 10 m3 # check this value with Jarno cell_area_assumption = verticalSizeInMeter * float(pcr.cellvalue(pcr.mapmaximum(horizontalSizeInMeter),1)[0]) self.criteria_RCLOSE = [10., 10.* cell_area_assumption/(250.*250.), 10.* cell_area_assumption/(25.*25.)] self.criteria_RCLOSE = sorted(self.criteria_RCLOSE) # initiate the index for HCLOSE and RCLOSE self.iteration_HCLOSE = 0 self.iteration_RCLOSE = 0 # initiate old style reporting # TODO: remove this! self.initiate_old_style_groundwater_reporting(iniItems)
cmd = "col2map --clone " + ldd_file_name + \ " -S -x 3 -y 2 -v 4 " + "one_line.tmp" + " usgs_drain_area.map" print(cmd) os.system(cmd) usgs_drain_area_km2 = pcr.readmap("usgs_drain_area.map") # pcrglobwb catchment area edwin_code_pcrglobwb_catchment_area_km2 = pcr.ifthen( pcr.defined(edwin_code), pcrglobwb_catchment_area_km2) # calculate the absolute difference abs_diff = pcr.abs(usgs_drain_area_km2 - edwin_code_pcrglobwb_catchment_area_km2) # make correction if required abs_diff_value = pcr.cellvalue(pcr.mapmaximum(abs_diff), 1)[0] usgs_drain_area_km2 = pcr.cellvalue(pcr.mapmaximum(usgs_drain_area_km2), 1)[0] if (usgs_drain_area_km2 > 1000.0) and \ (abs_diff_value > 0.10 * usgs_drain_area_km2): # class within 0.1 arc degree windows edwin_code = pcr.windowmajority(edwin_code, 0.1) # find the most accurate cell: areaorder = pcr.areaorder( pcr.windowmaximum(pcr.spatial(pcr.scalar(usgs_drain_area_km2)), 0.1) - pcrglobwb_catchment_area_km2, edwin_code) # select pixel
def main(): # output folder (and tmp folder) clean_out_folder = True if os.path.exists(out_folder): if clean_out_folder: shutil.rmtree(out_folder) os.makedirs(out_folder) else: os.makedirs(out_folder) os.chdir(out_folder) os.system("pwd") # set the clone map print("set the clone") pcr.setclone(global_ldd_30min_inp_file) # define the landmask print("define the landmask") # - based on the 30min input landmask_30min = define_landmask(input_file = global_landmask_30min_file,\ clone_map_file = global_ldd_30min_inp_file,\ output_map_file = "landmask_30min_only.map") # - based on the 05min input landmask_05min = define_landmask(input_file = global_landmask_05min_file,\ clone_map_file = global_ldd_30min_inp_file,\ output_map_file = "landmask_05min_only.map") # - based on the 06min input landmask_06min = define_landmask(input_file = global_landmask_06min_file,\ clone_map_file = global_ldd_30min_inp_file,\ output_map_file = "landmask_06min_only.map") # - based on the 30sec input landmask_30sec = define_landmask(input_file = global_landmask_30sec_file,\ clone_map_file = global_ldd_30min_inp_file,\ output_map_file = "landmask_30sec_only.map") # - based on the 30sec input landmask_03sec = define_landmask(input_file = global_landmask_03sec_file,\ clone_map_file = global_ldd_30min_inp_file,\ output_map_file = "landmask_03sec_only.map") # # - merge all landmasks landmask = pcr.cover(landmask_30min, landmask_05min, landmask_06min, landmask_30sec, landmask_03sec) pcr.report(landmask, "global_landmask_extended_30min.map") # ~ pcr.aguila(landmask) # extend ldd print("extend/define the ldd") ldd_map = pcr.readmap(global_ldd_30min_inp_file) ldd_map = pcr.ifthen(landmask, pcr.cover(ldd_map, pcr.ldd(5))) pcr.report(ldd_map, "global_ldd_extended_30min.map") # ~ pcr.aguila(ldd_map) # catchment map and size catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map)) catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map) # ~ pcr.aguila(catchment_size) # identify small islands print("identify small islands") # - maps of islands smaller than 15000 cells (at half arc degree resolution) island_map = pcr.ifthen(landmask, pcr.clump(pcr.defined(ldd_map))) island_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), island_map) island_map = pcr.ifthen(island_size < 15000., island_map) # ~ # - use catchments (instead of islands) # ~ island_map = catchment_map # ~ island_size = catchment_size # ~ island_map = pcr.ifthen(island_size < 10000., island_map) # - sort from the largest island # -- take one cell per island as a representative island_map_rep_size = pcr.ifthen( pcr.areaorder(island_size, island_map) == 1.0, island_size) # -- sort from the largest island island_map_rep_ids = pcr.areaorder( island_map_rep_size * -1.00, pcr.ifthen(pcr.defined(island_map_rep_size), pcr.nominal(1.0))) # -- map of smaller islands, sorted from the largest one island_map = pcr.areamajority(pcr.nominal(island_map_rep_ids), island_map) # identify the biggest island for every group of small islands within a certain window (arcdeg cells) print("the biggest island for every group of small islands") large_island_map = pcr.ifthen( pcr.scalar(island_map) == pcr.windowminimum(pcr.scalar(island_map), 15.), island_map) # ~ pcr.aguila(large_island_map) # identify big catchments print("identify large catchments") catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map)) catchment_size = pcr.areatotal(pcr.spatial(pcr.scalar(1.0)), catchment_map) # ~ # - identify all large catchments with size >= 50 cells (at the resolution of 30 arcmin) = 50 x (50^2) km2 = 125000 km2 # ~ large_catchment_map = pcr.ifthen(catchment_size >= 50, catchment_map) # ~ # - identify all large catchments with size >= 10 cells (at the resolution of 30 arcmin) # ~ large_catchment_map = pcr.ifthen(catchment_size >= 10, catchment_map) # ~ # - identify all large catchments with size >= 5 cells (at the resolution of 30 arcmin) # ~ large_catchment_map = pcr.ifthen(catchment_size >= 5, catchment_map) # ~ # - identify all large catchments with size >= 20 cells (at the resolution of 30 arcmin) # ~ large_catchment_map = pcr.ifthen(catchment_size >= 20, catchment_map) # - identify all large catchments with size >= 25 cells (at the resolution of 30 arcmin) large_catchment_map = pcr.ifthen(catchment_size >= 25, catchment_map) # - give the codes that are different than islands large_catchment_map = pcr.nominal( pcr.scalar(large_catchment_map) + 10. * vos.getMinMaxMean(pcr.scalar(large_island_map))[1]) # merge biggest islands and big catchments print("merge large catchments and islands") large_catchment_and_island_map = pcr.cover(large_catchment_map, large_island_map) # ~ large_catchment_and_island_map = pcr.cover(large_island_map, large_catchment_map) large_catchment_and_island_map_size = pcr.areatotal( pcr.spatial(pcr.scalar(1.0)), large_catchment_and_island_map) # - sort from the largest one # -- take one cell per island as a representative large_catchment_and_island_map_rep_size = pcr.ifthen( pcr.areaorder(large_catchment_and_island_map_size, large_catchment_and_island_map) == 1.0, large_catchment_and_island_map_size) # -- sort from the largest large_catchment_and_island_map_rep_ids = pcr.areaorder( large_catchment_and_island_map_rep_size * -1.00, pcr.ifthen(pcr.defined(large_catchment_and_island_map_rep_size), pcr.nominal(1.0))) # -- map of largest catchments and islands, sorted from the largest one large_catchment_and_island_map = pcr.areamajority( pcr.nominal(large_catchment_and_island_map_rep_ids), large_catchment_and_island_map) # ~ pcr.report(large_catchment_and_island_map, "large_catchments_and_islands.map") # ~ # perform cdo fillmiss2 in order to merge the small catchments to the nearest large catchments # ~ print("spatial interpolation/extrapolation using cdo fillmiss2 to get initial subdomains") # ~ cmd = "gdal_translate -of NETCDF large_catchments_and_islands.map large_catchments_and_islands.nc" # ~ print(cmd); os.system(cmd) # ~ cmd = "cdo fillmiss2 large_catchments_and_islands.nc large_catchments_and_islands_filled.nc" # ~ print(cmd); os.system(cmd) # ~ cmd = "gdal_translate -of PCRaster large_catchments_and_islands_filled.nc large_catchments_and_islands_filled.map" # ~ print(cmd); os.system(cmd) # ~ cmd = "mapattr -c " + global_ldd_30min_inp_file + " " + "large_catchments_and_islands_filled.map" # ~ print(cmd); os.system(cmd) # ~ # - initial subdomains # ~ subdomains_initial = pcr.nominal(pcr.readmap("large_catchments_and_islands_filled.map")) # ~ subdomains_initial = pcr.areamajority(subdomains_initial, catchment_map) # ~ pcr.aguila(subdomains_initial) # spatial interpolation/extrapolation in order to merge the small catchments to the nearest large catchments print("spatial interpolation/extrapolation to get initial subdomains") field = large_catchment_and_island_map cellID = pcr.nominal(pcr.uniqueid(pcr.defined(field))) zoneID = pcr.spreadzone(cellID, 0, 1) field = pcr.areamajority(field, zoneID) subdomains_initial = field subdomains_initial = pcr.areamajority(subdomains_initial, catchment_map) pcr.aguila(subdomains_initial) pcr.report(subdomains_initial, "global_subdomains_30min_initial.map") print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[0]))) print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1]))) # ~ print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial_clump))[0]))) # ~ print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial_clump))[1]))) print("Checking all subdomains, avoid too large subdomains") num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1]) # clone code that will be assigned assigned_number = 0 subdomains_final = pcr.ifthen( pcr.scalar(subdomains_initial) < -7777, pcr.nominal(0)) for nr in range(1, num_of_masks + 1, 1): msg = "Processing the landmask %s" % (str(nr)) print(msg) mask_selected_boolean = pcr.ifthen(subdomains_initial == nr, pcr.boolean(1.0)) # ~ if nr == 1: pcr.aguila(mask_selected_boolean) xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean) area_in_degree2 = (xmax - xmin) * (ymax - ymin) # ~ print(str(area_in_degree2)) # check whether the size of bounding box is ok # - initial check value check_ok = True reference_area_in_degree2 = 2500. if area_in_degree2 > 1.50 * reference_area_in_degree2: check_ok = False if (xmax - xmin) > 10 * (ymax - ymin): check_ok = False if check_ok == True: msg = "Clump is not needed." msg = "\n\n" + str(msg) + "\n\n" print(msg) # assign the clone code assigned_number = assigned_number + 1 # update global landmask for river and land mask_selected_nominal = pcr.ifthen(mask_selected_boolean, pcr.nominal(assigned_number)) subdomains_final = pcr.cover(subdomains_final, mask_selected_nominal) if check_ok == False: msg = "Clump is needed." msg = "\n\n" + str(msg) + "\n\n" print(msg) # make clump clump_ids = pcr.nominal(pcr.clump(mask_selected_boolean)) # merge clumps that are close together clump_ids_window_majority = pcr.windowmajority(clump_ids, 10.0) clump_ids = pcr.areamajority(clump_ids_window_majority, clump_ids) # ~ pcr.aguila(clump_ids) # minimimum and maximum values min_clump_id = int( pcr.cellvalue(pcr.mapminimum(pcr.scalar(clump_ids)), 1)[0]) max_clump_id = int( pcr.cellvalue(pcr.mapmaximum(pcr.scalar(clump_ids)), 1)[0]) for clump_id in range(min_clump_id, max_clump_id + 1, 1): msg = "Processing the clump %s of %s from the landmask %s" % ( str(clump_id), str(max_clump_id), str(nr)) msg = "\n\n" + str(msg) + "\n\n" print(msg) # identify mask based on the clump mask_selected_boolean_from_clump = pcr.ifthen( clump_ids == pcr.nominal(clump_id), mask_selected_boolean) mask_selected_boolean_from_clump = pcr.ifthen( mask_selected_boolean_from_clump, mask_selected_boolean_from_clump) # check whether the clump is empty check_mask_selected_boolean_from_clump = pcr.ifthen( mask_selected_boolean, mask_selected_boolean_from_clump) check_if_empty = float( pcr.cellvalue( pcr.mapmaximum( pcr.scalar( pcr.defined( check_mask_selected_boolean_from_clump))), 1)[0]) if check_if_empty == 0.0: msg = "Map is empty !" msg = "\n\n" + str(msg) + "\n\n" print(msg) else: msg = "Map is NOT empty !" msg = "\n\n" + str(msg) + "\n\n" print(msg) # assign the clone code assigned_number = assigned_number + 1 # update global landmask for river and land mask_selected_nominal = pcr.ifthen( mask_selected_boolean_from_clump, pcr.nominal(assigned_number)) subdomains_final = pcr.cover(subdomains_final, mask_selected_nominal) # ~ # kill all aguila processes if exist # ~ os.system('killall aguila') pcr.aguila(subdomains_final) print("") print("") print("") print("The subdomain map is READY.") pcr.report(subdomains_final, "global_subdomains_30min_final.map") num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_final))[1]) print(num_of_masks) print("") print("") print("") for nr in range(1, num_of_masks + 1, 1): mask_selected_boolean = pcr.ifthen(subdomains_final == nr, pcr.boolean(1.0)) xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean) area_in_degree2 = (xmax - xmin) * (ymax - ymin) print( str(nr) + " ; " + str(area_in_degree2) + " ; " + str((xmax - xmin)) + " ; " + str((ymax - ymin))) print("") print("") print("") print("Number of subdomains: " + str(num_of_masks)) print("") print("") print("") # spatial extrapolation in order to cover the entire map print("spatial interpolation/extrapolation to cover the entire map") field = subdomains_final cellID = pcr.nominal(pcr.uniqueid(pcr.defined(field))) zoneID = pcr.spreadzone(cellID, 0, 1) field = pcr.areamajority(field, zoneID) subdomains_final_filled = field pcr.aguila(subdomains_final_filled) pcr.report(subdomains_final_filled, "global_subdomains_30min_final_filled.map")
class TimeoutputTimeseries(object): """ Class to create pcrcalc timeoutput style timeseries """ def __init__(self, tssFilename, model, idMap=None, noHeader=False): """ """ if not isinstance(tssFilename, str): raise Exception( "timeseries output filename must be of type string") self._outputFilename = tssFilename self._maxId = 1 self._spatialId = None self._spatialDatatype = None self._spatialIdGiven = False self._userModel = model self._writeHeader = not noHeader # array to store the timestep values self._sampleValues = None _idMap = False if isinstance(idMap, str) or isinstance(idMap, pcraster._pcraster.Field): _idMap = True nrRows = self._userModel.nrTimeSteps() - self._userModel.firstTimeStep( ) + 1 if _idMap: self._spatialId = idMap if isinstance(idMap, str): self._spatialId = pcraster.readmap(idMap) _allowdDataTypes = [ pcraster.Nominal, pcraster.Ordinal, pcraster.Boolean ] if self._spatialId.dataType() not in _allowdDataTypes: raise Exception( "idMap must be of type Nominal, Ordinal or Boolean") if self._spatialId.isSpatial(): self._maxId, valid = pcraster.cellvalue( pcraster.mapmaximum(pcraster.ordinal(self._spatialId)), 1) else: self._maxId = 1 # cell indices of the sample locations self._sampleAddresses = [] for cellId in range(1, self._maxId + 1): self._sampleAddresses.append(self._getIndex(cellId)) self._spatialIdGiven = True nrCols = self._maxId self._sampleValues = [[Decimal("NaN")] * nrCols for _ in [0] * nrRows] else: self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows] def _getIndex(self, cellId): """ returns the cell index of a sample location """ nrCells = pcraster.clone().nrRows() * pcraster.clone().nrCols() found = False cell = 1 index = 0 while found == False: if pcraster.cellvalue(self._spatialId, cell)[1] == True and pcraster.cellvalue( self._spatialId, cell)[0] == cellId: index = cell found = True cell += 1 if cell > nrCells: raise RuntimeError( "could not find a cell with the index number %d" % (cellId)) return index def sample(self, expression): """ Sampling the current values of 'expression' at the given locations for the current timestep """ arrayRowPos = self._userModel.currentTimeStep( ) - self._userModel.firstTimeStep() #if isinstance(expression, float): # expression = pcraster.scalar(expression) try: # store the data type for tss file header if self._spatialDatatype == None: self._spatialDatatype = str(expression.dataType()) except AttributeError, e: datatype, sep, tail = str(e).partition(" ") msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % ( datatype) raise AttributeError(msg) if self._spatialIdGiven: if expression.dataType() == pcraster.Scalar or expression.dataType( ) == pcraster.Directional: tmp = pcraster.areaaverage(pcraster.spatial(expression), pcraster.spatial(self._spatialId)) else: tmp = pcraster.areamajority(pcraster.spatial(expression), pcraster.spatial(self._spatialId)) col = 0 for cellIndex in self._sampleAddresses: value, valid = pcraster.cellvalue(tmp, cellIndex) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos][col] = value col += 1 else: if expression.dataType() == pcraster.Scalar or expression.dataType( ) == pcraster.Directional: tmp = pcraster.maptotal(pcraster.spatial(expression))\ / pcraster.maptotal(pcraster.scalar(pcraster.defined(pcraster.spatial(expression)))) else: tmp = pcraster.mapmaximum(pcraster.maptotal(pcraster.areamajority(pcraster.spatial(expression),\ pcraster.spatial(pcraster.nominal(1))))) value, valid = pcraster.cellvalue(tmp, 1) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos] = value if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps(): self._writeTssFile()
def readTopo(self, iniItems, optionDict): # a dictionary/section of options that will be used if optionDict == None: optionDict = iniItems._sections["landSurfaceOptions"] # maps of elevation attributes: topoParams = ["tanslope", "slopeLength", "orographyBeta"] if optionDict["topographyNC"] == str(None): for var in topoParams: input = configget(iniItems, "landSurfaceOptions", str(var), "None") vars(self)[var] = vos.readPCRmapClone( input, self.cloneMap, self.tmpDir, self.inputDir ) if var != "slopeLength": vars(self)[var] = pcr.cover(vars(self)[var], 0.0) else: topoPropertiesNC = vos.getFullPath( optionDict["topographyNC"], self.inputDir ) for var in topoParams: vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime( topoPropertiesNC, var, cloneMapFileName=self.cloneMap ) if var != "slopeLength": vars(self)[var] = pcr.cover(vars(self)[var], 0.0) # ~ self.tanslope = pcr.max(self.tanslope, 0.00001) # In principle, tanslope can be zero. Zero tanslope will provide zero TCL (no interflow) # covering slopeLength with its maximum value self.slopeLength = pcr.cover(self.slopeLength, pcr.mapmaximum(self.slopeLength)) # maps of relative elevation above flood plains dzRel = [ "dzRel0001", "dzRel0005", "dzRel0010", "dzRel0020", "dzRel0030", "dzRel0040", "dzRel0050", "dzRel0060", "dzRel0070", "dzRel0080", "dzRel0090", "dzRel0100", ] if optionDict["topographyNC"] == str(None): for i in range(0, len(dzRel)): var = dzRel[i] input = optionDict[str(var)] vars(self)[var] = vos.readPCRmapClone( input, self.cloneMap, self.tmpDir, self.inputDir ) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) if i > 0: vars(self)[var] = pcr.max(vars(self)[var], vars(self)[dzRel[i - 1]]) else: for i in range(0, len(dzRel)): var = dzRel[i] vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime( topoPropertiesNC, var, cloneMapFileName=self.cloneMap ) vars(self)[var] = pcr.cover(vars(self)[var], 0.0) if i > 0: vars(self)[var] = pcr.max(vars(self)[var], vars(self)[dzRel[i - 1]])
def main(): # output folder clean_out_folder = True if os.path.exists(out_folder): if clean_out_folder: shutil.rmtree(out_folder) os.makedirs(out_folder) else: os.makedirs(out_folder) os.chdir(out_folder) os.system("pwd") # tmp folder tmp_folder = out_folder + "/tmp/" if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder) os.makedirs(tmp_folder) # set the clone map print("set the clone map") pcr.setclone(global_clone_map_file) # read ldd map print("define the ldd") # ~ ldd_map = pcr.readmap(global_ldd_inp_file) ldd_map = pcr.lddrepair(pcr.lddrepair(pcr.ldd(vos.readPCRmapClone(v = global_ldd_inp_file, \ cloneMapFileName = global_clone_map_file, \ tmpDir = tmp_folder, \ absolutePath = None, \ isLddMap = True, \ cover = None, \ isNomMap = False)))) # define the landmask if landmask_map_file == None: print("define the landmask based on the ldd input") # ~ landmask = pcr.defined(pcr.readmap(global_ldd_inp_file)) landmask = pcr.defined(ldd_map) landmask = pcr.ifthen(landmask, landmask) else: print("define the landmask based on the input landmask_map_file") landmask = pcr.readmap(landmask_map_file) ldd_map = pcr.ifthen(landmask, pcr.cover(ldd_map, pcr.ldd(5))) ldd_map = pcr.lddrepair(pcr.lddrepair(pcr.ldd(ldd_map))) landmask = pcr.defined(ldd_map) landmask = pcr.ifthen(landmask, landmask) # save ldd files used # - global ldd cmd = "cp " + str(global_ldd_inp_file) + " ." print(cmd) os.system(cmd) # - ldd map that is used pcr.report(ldd_map, "lddmap_used.map") # make catchment map print("make catchment map") catchment_map = pcr.catchment(ldd_map, pcr.pit(ldd_map)) # read global subdomain file print("read global subdomain file") global_subdomain_map = vos.readPCRmapClone( v=global_subdomain_file, cloneMapFileName=global_clone_map_file, tmpDir=tmp_folder, absolutePath=None, isLddMap=False, cover=None, isNomMap=True) # set initial subdomain print("assign subdomains to all catchments") subdomains_initial = pcr.areamajority(global_subdomain_map, catchment_map) subdomains_initial = pcr.ifthen(landmask, subdomains_initial) pcr.aguila(subdomains_initial) pcr.report(subdomains_initial, "global_subdomains_initial.map") print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[0]))) print(str(int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1]))) print("Checking all subdomains, avoid too large subdomains") num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_initial))[1]) # clone code that will be assigned assigned_number = 0 subdomains_final = pcr.ifthen( pcr.scalar(subdomains_initial) < -7777, pcr.nominal(0)) for nr in range(1, num_of_masks + 1, 1): msg = "Processing the landmask %s" % (str(nr)) print(msg) mask_selected_boolean = pcr.ifthen(subdomains_initial == nr, pcr.boolean(1.0)) process_this_clone = False if pcr.cellvalue(pcr.mapmaximum(pcr.scalar(mask_selected_boolean)), 1, 1)[0] > 0: process_this_clone = True # ~ if nr == 1: pcr.aguila(mask_selected_boolean) # - initial check value check_ok = True if process_this_clone: xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean) area_in_degree2 = (xmax - xmin) * (ymax - ymin) # ~ print(str(area_in_degree2)) # check whether the size of bounding box is ok reference_area_in_degree2 = 2500. if area_in_degree2 > 1.50 * reference_area_in_degree2: check_ok = False if (xmax - xmin) > 10 * (ymax - ymin): check_ok = False # ~ # ignore checking # ~ check_ok = True if check_ok == True and process_this_clone == True: msg = "Clump is not needed." msg = "\n\n" + str(msg) + "\n\n" print(msg) # assign the clone code assigned_number = assigned_number + 1 # update global landmask for river and land mask_selected_nominal = pcr.ifthen(mask_selected_boolean, pcr.nominal(assigned_number)) subdomains_final = pcr.cover(subdomains_final, mask_selected_nominal) if check_ok == False and process_this_clone == True: msg = "Clump is needed." msg = "\n\n" + str(msg) + "\n\n" print(msg) # make clump clump_ids = pcr.nominal(pcr.clump(mask_selected_boolean)) # merge clumps that are close together clump_ids_window_majority = pcr.windowmajority(clump_ids, 10.0) clump_ids = pcr.areamajority(clump_ids_window_majority, clump_ids) # ~ pcr.aguila(clump_ids) # minimimum and maximum values min_clump_id = int( pcr.cellvalue(pcr.mapminimum(pcr.scalar(clump_ids)), 1)[0]) max_clump_id = int( pcr.cellvalue(pcr.mapmaximum(pcr.scalar(clump_ids)), 1)[0]) for clump_id in range(min_clump_id, max_clump_id + 1, 1): msg = "Processing the clump %s of %s from the landmask %s" % ( str(clump_id), str(max_clump_id), str(nr)) msg = "\n\n" + str(msg) + "\n\n" print(msg) # identify mask based on the clump mask_selected_boolean_from_clump = pcr.ifthen( clump_ids == pcr.nominal(clump_id), mask_selected_boolean) mask_selected_boolean_from_clump = pcr.ifthen( mask_selected_boolean_from_clump, mask_selected_boolean_from_clump) # check whether the clump is empty check_mask_selected_boolean_from_clump = pcr.ifthen( mask_selected_boolean, mask_selected_boolean_from_clump) check_if_empty = float( pcr.cellvalue( pcr.mapmaximum( pcr.scalar( pcr.defined( check_mask_selected_boolean_from_clump))), 1)[0]) if check_if_empty == 0.0: msg = "Map is empty !" msg = "\n\n" + str(msg) + "\n\n" print(msg) else: msg = "Map is NOT empty !" msg = "\n\n" + str(msg) + "\n\n" print(msg) # assign the clone code assigned_number = assigned_number + 1 # update global landmask for river and land mask_selected_nominal = pcr.ifthen( mask_selected_boolean_from_clump, pcr.nominal(assigned_number)) subdomains_final = pcr.cover(subdomains_final, mask_selected_nominal) # ~ # kill all aguila processes if exist # ~ os.system('killall aguila') pcr.aguila(subdomains_final) print("") print("") print("") print("The subdomain map is READY.") pcr.report(subdomains_final, "global_subdomains_final.map") num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_final))[1]) print(num_of_masks) print("") print("") print("") print("Making the clone and landmask maps for all subdomains") num_of_masks = int(vos.getMinMaxMean(pcr.scalar(subdomains_final))[1]) # clone and mask folders clone_folder = out_folder + "/clone/" if os.path.exists(clone_folder): shutil.rmtree(clone_folder) os.makedirs(clone_folder) mask_folder = out_folder + "/mask/" if os.path.exists(mask_folder): shutil.rmtree(mask_folder) os.makedirs(mask_folder) print("") print("") for nr in range(1, num_of_masks + 1, 1): msg = "Processing the subdomain %s" % (str(nr)) print(msg) # set the global clone pcr.setclone(global_clone_map_file) mask_selected_boolean = pcr.ifthen(subdomains_final == nr, pcr.boolean(1.0)) mask_selected_nominal = pcr.ifthen(subdomains_final == nr, pcr.nominal(nr)) mask_file = "mask/mask_%s.map" % (str(nr)) pcr.report(mask_selected_nominal, mask_file) xmin, ymin, xmax, ymax = boundingBox(mask_selected_boolean) area_in_degree2 = (xmax - xmin) * (ymax - ymin) print( str(nr) + " ; " + str(area_in_degree2) + " ; " + str((xmax - xmin)) + " ; " + str((ymax - ymin))) # cellsize in arcdegree cellsize = cellsize_in_arcmin / 60. # number of rows and cols num_rows = int(round(ymax - ymin) / cellsize) num_cols = int(round(xmax - xmin) / cellsize) # make the clone map using mapattr clonemap_mask_file = "clone/clonemap_mask_%s.map" % (str(nr)) cmd = "mapattr -s -R %s -C %s -B -P yb2t -x %s -y %s -l %s %s" % ( str(num_rows), str(num_cols), str(xmin), str(ymax), str(cellsize), clonemap_mask_file) print(cmd) os.system(cmd) # set the local landmask for the clump pcr.setclone(clonemap_mask_file) local_mask = vos.readPCRmapClone(v = mask_file, \ cloneMapFileName = clonemap_mask_file, tmpDir = tmp_folder, \ absolutePath = None, isLddMap = False, cover = None, isNomMap = True) local_mask_boolean = pcr.defined(local_mask) local_mask_boolean = pcr.ifthen(local_mask_boolean, local_mask_boolean) pcr.report(local_mask_boolean, mask_file) print("") print("") print("") print(num_of_masks)
def __init__(self, iniItems,landmask,spinUp): object.__init__(self) self.cloneMap = iniItems.cloneMap self.tmpDir = iniItems.tmpDir self.inputDir = iniItems.globalOptions['inputDir'] self.landmask = landmask # option to activate water balance check self.debugWaterBalance = True if iniItems.routingOptions['debugWaterBalance'] == "False": self.debugWaterBalance = False if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == str(None): # assign the recession coefficient parameter(s) self.recessionCoeff = vos.readPCRmapClone(\ iniItems.groundwaterOptions['recessionCoeff'], self.cloneMap,self.tmpDir,self.inputDir) else: groundwaterPropertiesNC = vos.getFullPath(\ iniItems.groundwaterOptions[\ 'groundwaterPropertiesNC'], self.inputDir) self.recessionCoeff = vos.netcdf2PCRobjCloneWithoutTime(\ groundwaterPropertiesNC,'recessionCoeff',\ cloneMapFileName = self.cloneMap) # groundwater recession coefficient (day-1_ self.recessionCoeff = pcr.cover(self.recessionCoeff,0.00) self.recessionCoeff = pcr.min(1.0000,self.recessionCoeff) # if 'minRecessionCoeff' in iniItems.groundwaterOptions.keys(): minRecessionCoeff = float(iniItems.groundwaterOptions['minRecessionCoeff']) else: minRecessionCoeff = 1.0e-4 # This is the minimum value used in Van Beek et al. (2011). self.recessionCoeff = pcr.max(minRecessionCoeff,self.recessionCoeff) if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == str(None): # assign aquifer specific yield self.specificYield = vos.readPCRmapClone(\ iniItems.groundwaterOptions['specificYield'], self.cloneMap,self.tmpDir,self.inputDir) else: self.specificYield = vos.netcdf2PCRobjCloneWithoutTime(\ groundwaterPropertiesNC,'specificYield',\ cloneMapFileName = self.cloneMap) self.specificYield = pcr.cover(self.specificYield,0.0) self.specificYield = pcr.max(0.010,self.specificYield) # TODO: TO BE CHECKED: The resample process of specificYield self.specificYield = pcr.min(1.000,self.specificYield) if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == str(None): # assign aquifer saturated conductivity self.kSatAquifer = vos.readPCRmapClone(\ iniItems.groundwaterOptions['kSatAquifer'], self.cloneMap,self.tmpDir,self.inputDir) else: self.kSatAquifer = vos.netcdf2PCRobjCloneWithoutTime(\ groundwaterPropertiesNC,'kSatAquifer',\ cloneMapFileName = self.cloneMap) self.kSatAquifer = pcr.cover(self.kSatAquifer,0.0) self.kSatAquifer = pcr.max(0.010,self.kSatAquifer) # limitAbstraction options self.limitAbstraction = False if iniItems.landSurfaceOptions['limitAbstraction'] == "True": self.limitAbstraction = True # option for limitting fossil groundwater abstractions - This option is only defined for IWMI project self.limitFossilGroundwaterAbstraction = False if self.limitAbstraction == False and\ "extraOptionsforProjectWithIWMI" in iniItems.allSections and\ iniItems.extraOptionsforProjectWithIWMI['limitFossilGroundWaterAbstraction'] == "True": logger.info('Fossil groundwater abstraction limit is used (IWMI project).') self.limitFossilGroundwaterAbstraction = True # estimate of thickness (unit: mm) of aceesible groundwater: shallow and deep totalGroundwaterThickness = vos.readPCRmapClone(\ iniItems.extraOptionsforProjectWithIWMI['estimateOfTotalGroundwaterThickness'], self.cloneMap,self.tmpDir,self.inputDir) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 1.0)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 1.5)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 2.5)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 5.0)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.windowaverage(totalGroundwaterThickness, 7.5)) totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, pcr.mapmaximum(totalGroundwaterThickness)) # set minimum thickness to 50 m: totalGroundwaterThickness = pcr.max(50.0, totalGroundwaterThickness) # estimate of capacity (unit: m) of renewable groundwater (shallow) storGroundwaterCap = pcr.cover( vos.readPCRmapClone(\ iniItems.extraOptionsforProjectWithIWMI['estimateOfRenewableGroundwaterCapacity'], self.cloneMap,self.tmpDir,self.inputDir),\ 0.0) # fossil groundwater capacity (unit: m) self.fossilWaterCap = pcr.max(0.0,\ totalGroundwaterThickness*self.specificYield - storGroundwaterCap) # option for limitting regional groundwater abstractions - This option is only defined self.limitRegionalAnnualGroundwaterAbstraction = False if "extraOptionsforProjectWithIWMI" in iniItems.allSections and\ iniItems.extraOptionsforProjectWithIWMI['limitRegionalAnnualGroundwaterAbstraction'] == "True": logger.info('Limit for regional groundwater abstraction is used (IWMI project).') self.limitRegionalAnnualGroundwaterAbstraction = True region_ids = vos.readPCRmapClone(\ iniItems.extraOptionsforProjectWithIWMI['regionIds'], self.cloneMap,self.tmpDir,self.inputDir) self.region_ids = pcr.nominal(region_ids) self.region_ids = pcr.ifthen(self.landmask, self.region_ids) self.regionalAnnualGroundwaterAbstractionLimit = vos.readPCRmapClone(\ iniItems.extraOptionsforProjectWithIWMI['pumpingCapacity'], self.cloneMap,self.tmpDir,self.inputDir) self.regionalAnnualGroundwaterAbstractionLimit = pcr.roundup(self.regionalAnnualGroundwaterAbstractionLimit*1000.)/1000. self.regionalAnnualGroundwaterAbstractionLimit = pcr.cover(self.regionalAnnualGroundwaterAbstractionLimit, 0.0) self.regionalAnnualGroundwaterAbstractionLimit *= 1000. * 1000. * 1000. # unit: m3/year self.regionalAnnualGroundwaterAbstractionLimit = pcr.ifthen(self.landmask,\ self.regionalAnnualGroundwaterAbstractionLimit) # zones at which water allocation (surface and groundwater allocation) is determined self.usingAllocSegments = False if iniItems.landSurfaceOptions['allocationSegmentsForGroundSurfaceWater'] != "None": self.usingAllocSegments = True # incorporating groundwater distribution network: if self.usingAllocSegments and self.limitAbstraction == False: self.allocSegments = vos.readPCRmapClone(\ iniItems.landSurfaceOptions['allocationSegmentsForGroundSurfaceWater'], self.cloneMap,self.tmpDir,self.inputDir,isLddMap=False,cover=None,isNomMap=True) self.allocSegments = pcr.ifthen(self.landmask, self.allocSegments) cellArea = vos.readPCRmapClone(\ iniItems.routingOptions['cellAreaMap'], self.cloneMap,self.tmpDir,self.inputDir) cellArea = pcr.ifthen(self.landmask, cellArea) # TODO: integrate this one with the one coming from the routing module self.segmentArea = pcr.areatotal(pcr.cover(cellArea, 0.0), self.allocSegments) self.segmentArea = pcr.ifthen(self.landmask, self.segmentArea) self.report = True try: self.outDailyTotNC = iniItems.groundwaterOptions['outDailyTotNC'].split(",") self.outMonthTotNC = iniItems.groundwaterOptions['outMonthTotNC'].split(",") self.outMonthAvgNC = iniItems.groundwaterOptions['outMonthAvgNC'].split(",") self.outMonthEndNC = iniItems.groundwaterOptions['outMonthEndNC'].split(",") self.outAnnuaTotNC = iniItems.groundwaterOptions['outAnnuaTotNC'].split(",") self.outAnnuaAvgNC = iniItems.groundwaterOptions['outAnnuaAvgNC'].split(",") self.outAnnuaEndNC = iniItems.groundwaterOptions['outAnnuaEndNC'].split(",") except: self.report = False if self.report == True: self.outNCDir = iniItems.outNCDir self.netcdfObj = PCR2netCDF(iniItems) # # daily output in netCDF files: if self.outDailyTotNC[0] != "None": for var in self.outDailyTotNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_dailyTot.nc",\ var,"undefined") # MONTHly output in netCDF files: # - cummulative if self.outMonthTotNC[0] != "None": for var in self.outMonthTotNC: # initiating monthlyVarTot (accumulator variable): vars(self)[var+'MonthTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthTot.nc",\ var,"undefined") # - average if self.outMonthAvgNC[0] != "None": for var in self.outMonthAvgNC: # initiating monthlyTotAvg (accumulator variable) vars(self)[var+'MonthTot'] = None # initiating monthlyVarAvg: vars(self)[var+'MonthAvg'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthAvg.nc",\ var,"undefined") # - last day of the month if self.outMonthEndNC[0] != "None": for var in self.outMonthEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthEnd.nc",\ var,"undefined") # YEARly output in netCDF files: # - cummulative if self.outAnnuaTotNC[0] != "None": for var in self.outAnnuaTotNC: # initiating yearly accumulator variable: vars(self)[var+'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaTot.nc",\ var,"undefined") # - average if self.outAnnuaAvgNC[0] != "None": for var in self.outAnnuaAvgNC: # initiating annualyVarAvg: vars(self)[var+'AnnuaAvg'] = None # initiating annualyTotAvg (accumulator variable) vars(self)[var+'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaAvg.nc",\ var,"undefined") # - last day of the year if self.outAnnuaEndNC[0] != "None": for var in self.outAnnuaEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaEnd.nc",\ var,"undefined") #get initial conditions self.getICs(iniItems,spinUp)
dem = pcr.cover(dem,linescover,pointscover) #pcr.report(dem,'dem1.map') dem = dem + burn #pcr.report(dem,'dem2.map') ldd = pcr.lddcreate(dem,float("1E35"),float("1E35"),float("1E35"),float("1E35")) else: ldd = pcr.lddcreate(dem,burnvalue/2,float("1E35"),float("1E35"),float("1E35")) streamorder = pcr.ordinal(pcr.streamorder(ldd)) river = pcr.boolean(pcr.ifthen(streamorder >= int(min(np.max(pcr.pcr2numpy(streamorder,-9999)),minorder)), streamorder)) outlets = pcr.ifthen(pcr.ordinal(ldd) == 5, pcr.boolean(1)) outlets = pcr.nominal(pcr.uniqueid(outlets)) catchments = pcr.nominal(pcr.catchment(ldd, outlets)) if not keepall: catchments = pcr.nominal(pcr.ifthen(pcr.mapmaximum(pcr.areatotal(pcr.scalar(catchments)*0+1,pcr.nominal(catchments))) == pcr.areatotal(pcr.scalar(catchments)*0+1,pcr.nominal(catchments)),catchments)) pcr.report(ldd,ldd_map) pcr.report(streamorder,streamorder_map) pcr.report(river,river_map) pcr.report(catchments,catchments_map) if not EPSG == None: call(('gdal_translate','-of','GTiff','-stats','-a_srs',EPSG,'-ot','Float32',catchments_map,catchments_tif)) else: call(('gdal_translate','-of','GTiff','-stats','-ot','Float32',catchments_map,catchments_tif)) wt.Raster2Pol(catchments_tif,catchshp,srs) riversid_map = workdir + 'riverid.map' drain_map = workdir + 'drain.map' ldd_mask = pcr.ifthen(river, ldd) upstream = pcr.upstream(ldd_mask, pcr.scalar(river)) downstream = pcr.downstream(ldd_mask, upstream)
channelLength= clippedRead.get(os.path.join(mapsDir,'channel_length.map')) channelDepth= clippedRead.get(os.path.join(mapsDir,'channel_depth.map')) floodplainMask= pcr.spatial(pcr.boolean(1)) # NOTE: set to zero for static, to one for dynamic floodplains channelManN= 0.04 floodplainManN= 0.10 #-flood plain parameterization #-root of file name with maps of relative elvation above floodplain # and associated fractions relZFileName= 'elev%04d.map' areaFractions=[0.0,0.01,0.05,0.10,0.20,0.30,0.40,\ 0.50,0.60,0.70,0.80,0.90,1.00] # reduction parameter of smoothing interval and error threshold reductionKK= 0.5 criterionKK= 40. #-modelSignature if pcr.cellvalue(pcr.mapmaximum(pcr.scalar(floodplainMask)),1)[0] == 1: modelSignature= forcingDataSet+'_dynamic-routing' else: modelSignature= forcingDataSet+'_static-routing' if noReservoirs: modelSignature= modelSignature+'_noreservoirs' modelSignature+= '_%s' % domainStr #-stacks of specific runoff and direct flux over water surface [m per unit area and per time step] # for model input and root of maps of initial storage [m3], discharge [m3/s], flood depth [m] and # flood fraction [m2/m2] landSurfaceQFileName= os.path.join(pathRes,'qloc') waterSurfaceQFileName= os.path.join(pathRes,'qw') actualStorageFileName= os.path.join(pathRes,'wst') QFileName= os.path.join(pathRes,'qc') flowVelocityFileName= os.path.join(pathRes,'vel') floodedDepthFileName= os.path.join(pathRes,'fldd')
def subcatch_stream(ldd, threshold, stream=None, min_strahler=-999, max_strahler=999, assign_edge=False, assign_existing=False, up_area=None, basin=None): """ Derive catchments based upon strahler threshold Input: ldd -- pcraster object direction, local drain directions threshold -- integer, strahler threshold, subcatchments ge threshold are derived stream=None -- pcraster object ordinal, stream order map (made with pcr.streamorder), if provided, stream order map is not generated on the fly but used from this map. Useful when a subdomain within a catchment is provided, which would cause edge effects in the stream order map min_strahler=-999 -- integer, minimum strahler threshold of river catchments to return max_strahler=999 -- integer, maximum strahler threshold of river catchments to return assign_unique=False -- if set to True, unassigned connected areas at the edges of the domain are assigned a unique id as well. If set to False, edges are not assigned assign_existing=False == if set to True, unassigned edges are assigned to existing basins with an upstream weighting. If set to False, edges are assigned to unique IDs, or not assigned output: stream_ge -- pcraster object, streams of strahler order ge threshold subcatch -- pcraster object, subcatchments of strahler order ge threshold """ # derive stream order if stream is None: stream = pcr.streamorder(ldd) stream_ge = pcr.ifthen(stream >= threshold, stream) stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0))) # detect any transfer of strahler order, to a higher strahler order. transition_strahler = pcr.ifthenelse(pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1), pcr.ifthenelse(pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse(pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1), pcr.boolean(0)))) # make unique ids (write to file) transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler)) # derive upstream catchment areas (write to file) subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique)) # mask out areas outside basin if basin is not None: subcatch = pcr.ifthen(basin, subcatch) if assign_edge: # fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far unique_edge = pcr.clump(pcr.ifthen(subcatch==0, pcr.ordinal(0))) subcatch = pcr.ifthenelse(subcatch==0, pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)), pcr.nominal(subcatch)) elif assign_existing: # unaccounted areas are added to largest nearest draining basin if up_area is None: up_area = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1)) riverid = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), subcatch) friction = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)) # *(pcr.scalar(ldd)*0+1) delta = pcr.ifthen(pcr.scalar(ldd)>=0, pcr.ifthen(pcr.cover(subcatch, 0)==0, pcr.spreadzone(pcr.cover(riverid, 0), 0, friction))) subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)), subcatch, delta) # finally, only keep basins with minimum and maximum river order flowing through them strahler_subcatch = pcr.areamaximum(stream, subcatch) subcatch = pcr.ifthen(pcr.ordinal(strahler_subcatch) >= min_strahler, pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch)) return stream_ge, pcr.ordinal(subcatch)