def _reportNew(self, variable, name, style=1): """ outputformat: (set in the [framework] section of the init file). 1: pcraster 2: numpy (compressed) 3: matlab 4: numpy text files (large and slow) .. # Example: [framework] outputformat = 4 """ head, tail = os.path.split(name) if re.search("\.", tail): msg = "File extension given in '" + name + "' not allowed, provide filename without extension" raise FrameworkError(msg) directoryPrefix = "" nameSuffix = ".map" newName = "" if hasattr(self._userModel(), "_inStochastic"): if self._userModel()._inStochastic(): if self._userModel()._inPremc(): newName = name + nameSuffix elif self._userModel()._inPostmc(): newName = name + nameSuffix else: directoryPrefix = str( self._userModel().currentSampleNumber()) if self._userModel()._inInitial(): newName = name + nameSuffix if hasattr(self._userModel(), "_inDynamic"): if self._userModel()._inDynamic() or self._inUpdateWeight(): newName = generateNameT(name, self._userModel().currentTimeStep()) path = os.path.join(directoryPrefix, newName) if self.outputFormat == 1: try: import PCRaster except ImportError: import pcraster as PCRaster PCRaster.report(variable, path) elif self.outputFormat == 2: numpy.savez(path, pcr2numpy(variable, -999)) elif self.outputFormat == 3: scipy.io.savemat(path, mdict={ str(self._userModel().currentTimeStep()): pcr2numpy(variable, -999) }) elif self.outputFormat == 4: numpy.savetxt(path, pcr2numpy(variable, -999), fmt="%0.6g")
def _getIndex(self, cellId): """ returns the cell index of a sample location """ nrCells = PCRaster.clone().nrRows() * PCRaster.clone().nrCols() found = False cell = 1 index = 0 while found == False and cell <= nrCells: if PCRaster.cellvalue(self._spatialId, cell)[1] == True and PCRaster.cellvalue( self._spatialId, cell)[0] == cellId: index = cell found = True cell += 1 return index
def wf_readmap(self, name, default): """ Adjusted version of readmapNew. the style variable is used to indicated how the data is read:: 1 - default: reads pcrmaps 2 - memory: assumes the map is made available (in memory) using the in-memory interface .. note: the style variable is set using the variable list from the API section in the ini file """ directoryPrefix = "" nameSuffix = ".map" newName = "" varname = os.path.basename(name) # check and if neede file the variable name object thevars = self.exchnageitems.getvars() if size(thevars) == 0: self.wf_supplyVariableNamesAndRoles() style = self.exchnageitems.getvarStyle(varname) #print varname + " : " + str(style) if hasattr(self._userModel(), "_inStochastic"): if self._userModel()._inStochastic(): if self._userModel()._inPremc() or self._userModel()._inPostmc( ): newName = name + nameSuffix else: directoryPrefix = str( self._userModel().currentSampleNumber()) if hasattr(self._userModel(), "_inInitial"): if self._userModel()._inInitial(): newName = name + nameSuffix if self._inResume(): timestep = self._userModel().firstTimeStep() newName = generateNameT(name, timestep - 1) if hasattr(self._userModel(), "_inDynamic"): if self._userModel()._inDynamic() or self._inUpdateWeight(): timestep = self._userModel().currentTimeStep() newName = generateNameT(name, timestep) if style == 1: path = os.path.join(directoryPrefix, newName) assert path is not "" try: import PCRaster except ImportError: import pcraster as PCRaster if os.path.isfile(path): return cover(PCRaster.readmap(path), default) else: #logger.warn("returning 0.0") return scalar(default) elif style == 2: # first get basename (last bit of path) name = os.path.basename(name) if hasattr(self._userModel(), name): exec "retval = self._userModel()." + name return retval else: exec "self._userModel()." + name + " = cover(scalar(default))" exec "retval = self._userModel()." + name return retval else: return cover(scalar(default))
import PCRaster as pcr staYear = int(1960) endYear = int(2001) timeStep = int(1) for year in range(staYear,endYear,timeStep): irrArea= pcr.scalar('irrigation\\irra%04d.map' % (year)) # for month in range(staMonth,endMonth,timeStep): # totDemand= 10 # otherDemands= pcr.scalar('otherWDs\\othr%04d.0%02d' % (year,month)) # totDemandAnn = otherDemands + totDemand # totDemandList[yCnt]= pcr.scalar(totDemandAnn) # yCnt+=1
##Yoshihide Wada, Last updated on 06/Feb/2012 #Modules import os, calendar import PCRaster as pcr #Run time staYear= int(2000) endYear= int(2001) staMonth= int(1) endMonth= int(13) timeStep= int(1) #Global input cloneFile= ('globalclone.map') pcr.setclone(cloneFile) clone= pcr.readmap(cloneFile) cellArea= pcr.readmap('cellarea30.map') LDD= pcr.readmap('globalldd.map') country= pcr.readmap('country.map') gwab2000= pcr.readmap('groundwater\\gwab_2000_mlnm3.map') efficiency= pcr.readmap('efficiency\\efficiency.map') rf1= pcr.readmap('rf\\rfcrops1.map') rf2= pcr.readmap('rf\\rfcrops2.map') kfc2= pcr.readmap('irrigation\\kfc100.map') #import from PCR-GLOBWB per calibration time step ##esp= 'PCRGLOBWB\\esp\\esp0%04d.%03d' #potential bare soil evaporation (m/day) ##esa= 'PCRGLOBWB\\esa\\esac%04d.%03d' #actual bare soil evaporation (m/day) ##t1p= 'PCRGLOBWB\\t1p\\t1p0%04d.%03d' #potential transpiration from soil layer 1 (m/day) ##t2p= 'PCRGLOBWB\\t2p\\t2p0%04d.%03d' #potential transpiration from soil layer 2 (m/day)
import PCRaster as pcr firstInput = pcr.readmap("first_map") for year in range(1960, 2001, 1): secondInput = pcr.readmap("second_map_%04d" % (year)) sum = firstInput + secondInput pcr.report(sum, "sum_map_%04d" % (year))
import os, zlib,zipfile from scipy import optimize import numpy as np import PCRaster as pcr from PCRaster.NumPy import pcr2numpy, numpy2pcr from PCRaster.Framework import generateNameT monthlyArchiveFile= 'cru_alpha_sc_results.zip' specificQArchiveFile= 'cru_specificrunoff_results.zip' rootQSpecFileNames= ['waterrunoff%s.map','landrunoff%s.map'] rootQFileName= 'qc' MV= -999. startYear= 1958; endYear= 2001 yearList= range(startYear,endYear+1) rootR3AVGFileName= 'r3_avg%s.map' LDD= pcr.readmap('glwd_lddlake.map') LDDBasins= pcr.readmap('glwd130m_ldd.map') cellArea= pcr.readmap('cellarea30.map') fracWat= pcr.readmap('glwd130m_fracw.map') lakeMask= pcr.readmap('lake.map') != 0 catchments= pcr.catchment(LDDBasins,pcr.pit(LDDBasins)) pcr.report(catchments,'catchments.map') maximumCatchmentID= pcr.cellvalue(pcr.mapmaximum(pcr.scalar(catchments)),1)[0] catchmentSizeLimit= 0. #-main #-opening zip file print 'extracting information from zip file' currentPath= os.getcwd() zipArchive= zipfile.ZipFile(monthlyArchiveFile)
def __init__(self, tssFilename, model, idMap=None, noHeader=False): """ """ if not isinstance(tssFilename, str): raise Exception( "timeseries output filename must be of type string") self._outputFilename = tssFilename self._maxId = 1 self._spatialId = None self._spatialDatatype = None self._spatialIdGiven = False self._userModel = model self._writeHeader = not noHeader # array to store the timestep values self._sampleValues = None _idMap = False #if isinstance(idMap, str) or isinstance(idMap, PCRaster._PCRaster.Field): if isinstance(idMap, str) or isinstance(idMap, PCRaster._pcraster.Field): _idMap = True nrRows = self._userModel.nrTimeSteps() - self._userModel.firstTimeStep( ) + 1 if _idMap: self._spatialId = idMap if isinstance(idMap, str): self._spatialId = PCRaster.readmap(idMap) _allowdDataTypes = [ PCRaster.Nominal, PCRaster.Ordinal, PCRaster.Boolean ] if self._spatialId.dataType() not in _allowdDataTypes: raise Exception( "idMap must be of type Nominal, Ordinal or Boolean") if self._spatialId.isSpatial(): self._maxId, valid = PCRaster.cellvalue( PCRaster.mapmaximum(PCRaster.ordinal(self._spatialId)), 1) else: self._maxId = 1 # cell indices of the sample locations self._sampleAddresses = [] for cellId in range(1, self._maxId + 1): thecellId = self._getIndex(cellId) if thecellId != 0: self._sampleAddresses.append(thecellId) else: print "CellId " + str(cellId) + " not found." self._spatialIdGiven = True nrCols = self._maxId self._sampleValues = [[Decimal("NaN")] * nrCols for _ in [0] * nrRows] else: self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows]
class wf_TimeoutputTimeseries(object): """ Class to create pcrcalc timeoutput style timeseries """ def __init__(self, tssFilename, model, idMap=None, noHeader=False): """ """ if not isinstance(tssFilename, str): raise Exception( "timeseries output filename must be of type string") self._outputFilename = tssFilename self._maxId = 1 self._spatialId = None self._spatialDatatype = None self._spatialIdGiven = False self._userModel = model self._writeHeader = not noHeader # array to store the timestep values self._sampleValues = None _idMap = False #if isinstance(idMap, str) or isinstance(idMap, PCRaster._PCRaster.Field): if isinstance(idMap, str) or isinstance(idMap, PCRaster._pcraster.Field): _idMap = True nrRows = self._userModel.nrTimeSteps() - self._userModel.firstTimeStep( ) + 1 if _idMap: self._spatialId = idMap if isinstance(idMap, str): self._spatialId = PCRaster.readmap(idMap) _allowdDataTypes = [ PCRaster.Nominal, PCRaster.Ordinal, PCRaster.Boolean ] if self._spatialId.dataType() not in _allowdDataTypes: raise Exception( "idMap must be of type Nominal, Ordinal or Boolean") if self._spatialId.isSpatial(): self._maxId, valid = PCRaster.cellvalue( PCRaster.mapmaximum(PCRaster.ordinal(self._spatialId)), 1) else: self._maxId = 1 # cell indices of the sample locations self._sampleAddresses = [] for cellId in range(1, self._maxId + 1): thecellId = self._getIndex(cellId) if thecellId != 0: self._sampleAddresses.append(thecellId) else: print "CellId " + str(cellId) + " not found." self._spatialIdGiven = True nrCols = self._maxId self._sampleValues = [[Decimal("NaN")] * nrCols for _ in [0] * nrRows] else: self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows] def _getIndex(self, cellId): """ returns the cell index of a sample location """ nrCells = PCRaster.clone().nrRows() * PCRaster.clone().nrCols() found = False cell = 1 index = 0 while found == False and cell <= nrCells: if PCRaster.cellvalue(self._spatialId, cell)[1] == True and PCRaster.cellvalue( self._spatialId, cell)[0] == cellId: index = cell found = True cell += 1 return index def sample(self, expression): """ Sampling the current values of 'expression' at the given locations for the current timestep """ arrayRowPos = self._userModel.currentTimeStep( ) - self._userModel.firstTimeStep() #if isinstance(expression, float): # expression = PCRaster.scalar(expression) try: # store the data type for tss file header if self._spatialDatatype == None: self._spatialDatatype = str(expression.dataType()) except AttributeError, e: datatype, sep, tail = str(e).partition(" ") msg = "Argument must be a PCRaster map, type %s given. If necessary use data conversion functions like scalar()" % ( datatype) raise AttributeError(msg) if self._spatialIdGiven: if expression.dataType() == PCRaster.Scalar or expression.dataType( ) == PCRaster.Directional: tmp = PCRaster.areaaverage(PCRaster.spatial(expression), PCRaster.spatial(self._spatialId)) else: tmp = PCRaster.areamajority(PCRaster.spatial(expression), PCRaster.spatial(self._spatialId)) col = 0 for cellIndex in self._sampleAddresses: value, valid = PCRaster.cellvalue(tmp, cellIndex) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos][col] = value col += 1 else: if expression.dataType() == PCRaster.Scalar or expression.dataType( ) == PCRaster.Directional: tmp = PCRaster.maptotal(PCRaster.spatial(expression))\ / PCRaster.maptotal(PCRaster.scalar(PCRaster.defined(PCRaster.spatial(expression)))) else: tmp = PCRaster.mapmaximum(PCRaster.maptotal(PCRaster.areamajority(PCRaster.spatial(expression),\ PCRaster.spatial(PCRaster.nominal(1))))) value, valid = PCRaster.cellvalue(tmp, 1) if not valid: value = Decimal("NaN") self._sampleValues[arrayRowPos] = value if self._userModel.currentTimeStep() == self._userModel.nrTimeSteps(): self._writeTssFile()