def getgridparams(): """ return grid parameters in a python friendly way Output: [ Xul, Yul, xsize, ysize, rows, cols] - xul - x upper left centre - yul - y upper left centre - xsize - size of a cell in x direction - ysize - size of a cell in y direction - cols - number of columns - rows - number of rows - xlr - x lower right centre - ylr - y lower right centre """ # This is the default, but add for safety... pcr.setglobaloption("coorcentre") # x and Y are the same for now xy = pcr.pcr2numpy(pcr.celllength(), np.nan)[0, 0] xu = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[0, 0] yu = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[0, 0] ylr = pcr.pcr2numpy(pcr.ycoordinate(1), np.nan)[getrows() - 1, getcols() - 1] xlr = pcr.pcr2numpy(pcr.xcoordinate(1), np.nan)[getrows() - 1, getcols() - 1] return [xu, yu, xy, xy, getrows(), getcols(), xlr, ylr]
def __init__(self,configuration,model,specificAttributeDictionary=None): # Set clone map pcr.setclone(configuration.cloneMap) cloneMap = pcr.boolean(1.0) # map with all cell values equal to 1 # Retrieve latitudes and longitudes from clone map self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) self.crops = np.arange(1, model.nCrop + 1) self.depths = np.arange(1, model.nComp + 1) # Let users decide what their preference regarding latitude order self.netcdf_y_orientation_follow_cf_convention = False if 'netcdf_y_orientation_follow_cf_convention' in configuration.reportingOptions.keys() and\ configuration.reportingOptions['netcdf_y_orientation_follow_cf_convention'] == "True": msg = "Latitude (y) orientation for output netcdf files start from the bottom to top." self.netcdf_y_orientation_follow_cf_convention = True self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV)) # Set general netcdf attributes (based on the information given in the ini/configuration file) self.set_general_netcdf_attributes(configuration, specificAttributeDictionary) # netcdf format and zlib setup self.format = 'NETCDF3_CLASSIC' self.zlib = False if "formatNetCDF" in configuration.reportingOptions.keys(): self.format = str(configuration.reportingOptions['formatNetCDF']) if "zlib" in configuration.reportingOptions.keys(): if configuration.reportingOptions['zlib'] == "True": self.zlib = True
def checkerboard(mapin, fcc): """ checkerboard create a checkerboard map with unique id's in a fcc*fcc cells area. The resulting map can be used to derive statistics for (later) upscaling of maps (using the fcc factor) .. warning: use with unitcell to get most reliable results! Input: - map (used to determine coordinates) - fcc (size of the areas in cells) Output: - checkerboard type map """ msker = pcr.defined(mapin) ymin = pcr.mapminimum(pcr.ycoordinate(msker)) yc = (pcr.ycoordinate((msker)) - ymin) / pcr.celllength() yc = pcr.rounddown(yc / fcc) # yc = yc/fcc xmin = pcr.mapminimum(pcr.xcoordinate((msker))) xc = (pcr.xcoordinate((msker)) - xmin) / pcr.celllength() xc = pcr.rounddown(xc / fcc) # xc = xc/fcc yc = yc * (pcr.mapmaximum(xc) + 1.0) xy = pcr.ordinal(xc + yc) return xy
def __init__(self, iniItems, specificAttributeDictionary=None): # cloneMap pcr.setclone(iniItems.cloneMap) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[ ::-1 ] self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # Let users decide what their preference regarding latitude order. self.netcdf_y_orientation_follow_cf_convention = False if ( "netcdf_y_orientation_follow_cf_convention" in list(iniItems.reportingOptions.keys()) and iniItems.reportingOptions["netcdf_y_orientation_follow_cf_convention"] == "True" ): msg = "Latitude (y) orientation for output netcdf files start from the bottom to top." self.netcdf_y_orientation_follow_cf_convention = True self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV)) # set the general netcdf attributes (based on the information given in the ini/configuration file) self.set_general_netcdf_attributes(iniItems, specificAttributeDictionary) # netcdf format and zlib setup self.format = "NETCDF3_CLASSIC" self.zlib = False if "formatNetCDF" in list(iniItems.reportingOptions.keys()): self.format = str(iniItems.reportingOptions["formatNetCDF"]) if "zlib" in list(iniItems.reportingOptions.keys()): if iniItems.reportingOptions["zlib"] == "True": self.zlib = True # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files' if "specific_attributes_for_netcdf_output_files" in iniItems.allSections: for key in list( iniItems.specific_attributes_for_netcdf_output_files.keys() ): self.attributeDictionary[ key ] = iniItems.specific_attributes_for_netcdf_output_files[key] if self.attributeDictionary[key] == "None": self.attributeDictionary[key] = "" if key == "history" and self.attributeDictionary[key] == "Default": self.attributeDictionary[ key ] = "created on " + datetime.datetime.today().isoformat(" ") if self.attributeDictionary[key] == "Default" and ( key == "date_created" or key == "date_issued" ): self.attributeDictionary[key] = datetime.datetime.today().isoformat( " " )
def __init__(self, iniItems, specificAttributeDictionary=None): # cloneMap pcr.setclone(iniItems.cloneMap) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # Let users decide what their preference regarding latitude order. self.netcdf_y_orientation_follow_cf_convention = False if ("netcdf_y_orientation_follow_cf_convention" in list( iniItems.reportingOptions.keys()) and iniItems. reportingOptions["netcdf_y_orientation_follow_cf_convention"] == "True"): msg = "Latitude (y) orientation for output netcdf files start from the bottom to top." self.netcdf_y_orientation_follow_cf_convention = True self.latitudes = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV)) # set the general netcdf attributes (based on the information given in the ini/configuration file) self.set_general_netcdf_attributes(iniItems, specificAttributeDictionary) # netcdf format and zlib setup self.format = "NETCDF3_CLASSIC" self.zlib = False if "formatNetCDF" in list(iniItems.reportingOptions.keys()): self.format = str(iniItems.reportingOptions["formatNetCDF"]) if "zlib" in list(iniItems.reportingOptions.keys()): if iniItems.reportingOptions["zlib"] == "True": self.zlib = True # if given in the ini file, use the netcdf as given in the section 'specific_attributes_for_netcdf_output_files' if "specific_attributes_for_netcdf_output_files" in iniItems.allSections: for key in list( iniItems.specific_attributes_for_netcdf_output_files.keys( )): self.attributeDictionary[ key] = iniItems.specific_attributes_for_netcdf_output_files[ key] if self.attributeDictionary[key] == "None": self.attributeDictionary[key] = "" if key == "history" and self.attributeDictionary[ key] == "Default": self.attributeDictionary[ key] = "created on " + datetime.datetime.today( ).isoformat(" ") if self.attributeDictionary[key] == "Default" and ( key == "date_created" or key == "date_issued"): self.attributeDictionary[key] = datetime.datetime.today( ).isoformat(" ")
def getRowColPoint(in_map, xcor, ycor): """ returns the row and col in a map at the point given. Works but is rather slow. Input: - in_map - map to determine coordinates from - xcor - x coordinate - ycor - y coordinate Output: - row, column """ x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan) y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.scalar(in_map) + 1.0)), np.nan) XX = pcr.pcr2numpy(pcr.celllength(), 0.0) tolerance = 0.5 # takes a single point diffx = x - xcor diffy = y - ycor col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance) # cellsize row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance) # cellsize point = col_ * row_ return point.argmax(0).max(), point.argmax(1).max()
def getValAtPoint(in_map, xcor, ycor): """ returns the value in a map at the point given. works but is rather slow. Input: - in_map - map to determine coordinates from - xcor - x coordinate - ycor - y coordinate Output: - value """ x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan) y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan) XX = pcr.pcr2numpy(pcr.celllength(), 0.0) themap = pcr.pcr2numpy(in_map, np.nan) tolerance = 0.5 # takes a single point diffx = x - xcor diffy = y - ycor col_ = np.absolute(diffx) <= (XX[0, 0] * tolerance) # cellsize row_ = np.absolute(diffy) <= (XX[0, 0] * tolerance) # cellsize point = col_ * row_ pt = point.argmax() return themap.ravel()[pt]
def __init__(self, netcdffile, logging): """ First try to setup a class read netcdf files (converted with pcr2netcdf.py) netcdffile: file to read the forcing data from logging: python logging object vars: list of variables to get from file """ if os.path.exists(netcdffile): self.dataset = netCDF4.Dataset(netcdffile, mode="r") else: msg = os.path.abspath(netcdffile) + " not found!" logging.error(msg) raise ValueError(msg) try: self.x = self.dataset.variables["x"][:] except: self.x = self.dataset.variables["lon"][:] # Now check Y values to see if we must flip the data try: self.y = self.dataset.variables["y"][:] except: self.y = self.dataset.variables["lat"][:] x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :] y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0] (self.latidx,) = np.logical_and(self.x >= x.min(), self.x < x.max()).nonzero() (self.lonidx,) = np.logical_and(self.y >= x.min(), self.y < y.max()).nonzero() logging.info("Reading static input from netCDF file: " + netcdffile)
def getCoordinates(cloneMap, MV=-9999): '''returns cell centre coordinates for a clone map as numpy array return longitudes, latitudes ''' cln = pcr.cover(pcr.boolean(cloneMap), pcr.boolean(1)) xMap = pcr.xcoordinate(cln) yMap = pcr.ycoordinate(cln) return pcr.pcr2numpy(xMap, MV)[1, :], pcr.pcr2numpy(yMap, MV)[:, 1]
def __init__(self, cloneMapFileName, netcdf_attribute_description): # cloneMap cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName)) cloneMap = pcr.boolean(pcr.scalar(1.0)) # latitudes and longitudes self.latitudes = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # netCDF format and attributes: self.format = 'NETCDF3_CLASSIC' self.attributeDictionary = {} self.attributeDictionary['institution'] = "European Commission - JRC" self.attributeDictionary[ 'title'] = "EFAS-Meteo 5km for the Rhine-Meuse basin" self.attributeDictionary[ 'source'] = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014" self.attributeDictionary[ 'history'] = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014." self.attributeDictionary[ 'references'] = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262" self.attributeDictionary[ 'comment'] = "Please use this dataset only for Hyper-Hydro test bed experiments. " self.attributeDictionary[ 'comment'] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). " self.attributeDictionary[ 'comment'] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. " self.attributeDictionary['description'] = netcdf_attribute_description
def __init__(self, iniItems): # cloneMap pcr.setclone(iniItems.cloneMap) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique(pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # TODO: Let users decide what their preference regarding latitude order. # Consult with Stefanie regarding CF convention. # netCDF format and attributes: self.attributeDictionary = {} self.attributeDictionary['institution'] = iniItems.globalOptions[ 'institution'] self.attributeDictionary['title'] = iniItems.globalOptions['title'] self.attributeDictionary['description'] = iniItems.globalOptions[ 'description'] # netcdf format and zlib setup self.format = 'NETCDF3_CLASSIC' self.zlib = False if "formatNetCDF" in iniItems.reportingOptions.keys(): self.format = str(iniItems.reportingOptions['formatNetCDF']) if "zlib" in iniItems.reportingOptions.keys(): if iniItems.reportingOptions['zlib'] == "True": self.zlib = True
def spatial(self): """Computes requruired biosafe output for a spatial domain""" #-determine a representative points for each floodplain section points = pcrr.representativePoint(self.sections) clone = pcr.defined(self.sections) pcr.setglobaloption('unittrue') xcoor = pcr.xcoordinate(clone) ycoor = pcr.ycoordinate(clone) geoDf = pcrr.getCellValues(points, \ mapList = [points, xcoor, ycoor],\ columns = ['ID', 'xcoor', 'ycoor']) geoDf.set_index('ID', inplace=True, drop=False) geoDf.drop(['rowIdx', 'colIdx', 'ID'], axis=1, inplace=True) #-compupte the required biosafe parameters for all sections sectionIDs = np.unique(pcr.pcr2numpy(self.sections,-9999))[1:] ll = [] for sectionID in sectionIDs: ll.append(self.sectionScores(sectionID)) paramLL = zip(*ll) dfParamLL = [] for ii in range(len(self.params)): bsScores = pd.concat(paramLL[ii], axis=1).T bsScores = bsScores.join(geoDf) bsScores.index.name = 'ID' bsScores.columns.name = self.params[ii] dfParamLL.append(bsScores) return dfParamLL
def __init__(self, cloneMapFileName, resetClone=None, attributeDictionary=None): # cloneMap if resetClone != None: pcr.setclone(cloneMapFileName) cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName)) cloneMap = pcr.boolean(pcr.scalar(1.0)) # latitudes and longitudes self.latitudes = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # reset clone (if necessary) if resetClone != None: pcr.setclone(resetClone) # netcdf format: self.format = 'NETCDF3_CLASSIC' self.attributeDictionary = {} if attributeDictionary == None: self.attributeDictionary['institution'] = "None" self.attributeDictionary['title'] = "None" self.attributeDictionary['source'] = "None" self.attributeDictionary['history'] = "None" self.attributeDictionary['references'] = "None" self.attributeDictionary['description'] = "None" self.attributeDictionary['comment'] = "None" else: self.attributeDictionary = attributeDictionary
def boundingBox(pcrmap): ''' derive the bounding box for a map, return xmin,ymin,xmax,ymax ''' bb = [] xcoor = pcr.xcoordinate(pcrmap) ycoor = pcr.ycoordinate(pcrmap) xmin = pcr.cellvalue(pcr.mapminimum(xcoor), 1, 1)[0] xmax = pcr.cellvalue(pcr.mapmaximum(xcoor), 1, 1)[0] ymin = pcr.cellvalue(pcr.mapminimum(ycoor), 1, 1)[0] ymax = pcr.cellvalue(pcr.mapmaximum(ycoor), 1, 1)[0] return [math.floor(xmin), math.floor(ymin), math.ceil(xmax), math.ceil(ymax)]
def get_model_dimensions(self): """Function to set model dimensions""" self.nLat = int(self.cloneMapAttributes['rows']) self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(self.cloneMap), vos.MV))[::-1] self.nLon = int(self.cloneMapAttributes['cols']) self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(self.cloneMap), vos.MV)) self.nCell = int(np.sum(self.landmask)) self.nLayer = 3 # FIXED self.dimensions = { 'time' : None, 'depth' : np.arange(self.nLayer), # TODO - put nComp in config section [SOIL] 'lat' : self.latitudes, 'lon' : self.longitudes, }
def map_edges(clone): """Boolean map true map edges, false elsewhere""" pcr.setglobaloption('unittrue') xmin, xmax, ymin, ymax, nr_rows, nr_cols, cell_size = clone_attributes() clone = pcr.ifthenelse(pcr.defined(clone), pcr.boolean(1), pcr.boolean(1)) x_coor = pcr.xcoordinate(clone) y_coor = pcr.ycoordinate(clone) north = y_coor > (ymax - cell_size) south = y_coor < (ymin + cell_size) west = x_coor < (xmin + cell_size) east = x_coor > (xmax - cell_size) edges = north | south | west | east return edges
def pcr2col(listOfMaps, MV, selection='ONE_TRUE'): """converts a set of maps to a column array: X, Y, map values selection can be set to ALL, ALL_TRUE, ONE_TRUE""" #-intersect all maps and get X and Y coordinates intersection = pcr.boolean(pcr.cover(listOfMaps[0], 0)) for mapX in listOfMaps[1:]: intersection = intersection | pcr.boolean(pcr.cover(mapX, 0)) pcr.setglobaloption("unittrue") xCoor = pcr.ifthen(intersection, pcr.xcoordinate(intersection)) yCoor = pcr.ifthen(intersection, pcr.ycoordinate(intersection)) pcr.setglobaloption("unitcell") #-initiate outArray with xCoor and yCoor xCoorArr = pcr.pcr2numpy(xCoor, MV) yCoorArr = pcr.pcr2numpy(yCoor, MV) nRows, nCols = xCoorArr.shape nrCells = nRows * nCols outArray = np.hstack((xCoorArr.reshape(nrCells, 1), yCoorArr.reshape(nrCells, 1))) #-add subsequent maps for mapX in listOfMaps: arr = pcr.pcr2numpy(mapX, MV).reshape(nrCells, 1) outArray = np.hstack((outArray, arr)) #-subset output based on selection criterium ll = [] nrMaps = len(listOfMaps) if selection == 'ONE_TRUE': for line in outArray: nrMV = len(line[line == MV]) if nrMV < nrMaps: ll.append(line) else: pass outArray = np.array(ll) elif selection == 'ALL_TRUE': for line in outArray: if MV not in line: ll.append(line) else: pass outArray = np.array(ll) elif selection == 'ALL': pass return outArray
def points_to_map(in_map, xcor, ycor, tolerance): """ Returns a map with non zero values at the points defined in X, Y pairs. It's goal is to replace the pcraster col2map program. tolerance should be 0.5 to select single points Performance is not very good and scales linear with the number of points Input: - in_map - map to determine coordinates from - xcor - x coordinate (array or single value) - ycor - y coordinate (array or single value) - tolerance - tolerance in cell units. 0.5 selects a single cell\ 10 would select a 10x10 block of cells Output: - Map with values burned in. 1 for first point, 2 for second and so on """ point = in_map * 0.0 x = pcr.pcr2numpy(pcr.xcoordinate(pcr.defined(in_map)), np.nan) y = pcr.pcr2numpy(pcr.ycoordinate(pcr.defined(in_map)), np.nan) cell_length = float(pcr.celllength()) # simple check to use both floats and numpy arrays try: c = xcor.ndim except: xcor = np.array([xcor]) ycor = np.array([ycor]) # Loop over points and "burn in" map for n in range(0, xcor.size): if Verbose: print(n) diffx = x - xcor[n] diffy = y - ycor[n] col_ = np.absolute(diffx) <= (cell_length * tolerance) # cellsize row_ = np.absolute(diffy) <= (cell_length * tolerance) # cellsize point = point + pcr.numpy2pcr(pcr.Scalar, ((col_ * row_) * (n + 1)), np.nan) return pcr.ordinal(point)
def __init__(self, cloneMapFile, attribute=None, cellSizeInArcMinutes=None): # cloneMap # - the cloneMap must be at 5 arc min resolution cloneMap = pcr.readmap(cloneMapFile) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) #~ # properties of the clone map #~ # - number of rows and columns #~ self.nrRows = np.round(pcr.clone().nrRows()) #~ self.nrCols = np.round(pcr.clone().nrCols()) #~ # - upper right coordinate, unit: arc degree ; must be integer (without decimals) #~ self.minLongitude = np.round(pcr.clone().west() , 0) #~ self.maxLatitude = np.round(pcr.clone().north(), 0) #~ # - cell resolution, unit: arc degree #~ self.cellSize = pcr.clone().cellSize() #~ if cellSizeInArcMinutes != None: self.cellSize = cellSizeInArcMinutes / 60.0 #~ # - lower right coordinate, unit: arc degree ; must be integer (without decimals) #~ self.maxLongitude = np.round(self.minLongitude + self.cellSize*self.nrCols, 0) #~ self.minLatitude = np.round(self.maxLatitude - self.cellSize*self.nrRows, 0) #~ #~ # latitudes and longitudes for netcdf files #~ latMin = self.minLatitude + self.cellSize / 2 #~ latMax = self.maxLatitude - self.cellSize / 2 #~ lonMin = self.minLongitude + self.cellSize / 2 #~ lonMax = self.maxLongitude - self.cellSize / 2 #~ self.longitudes = np.arange(lonMin,lonMax+self.cellSize, self.cellSize) #~ self.latitudes= np.arange(latMax,latMin-self.cellSize,-self.cellSize) # netCDF format and attributes: self.format = 'NETCDF4' self.attributeDictionary = {} if attribute == None: self.attributeDictionary['institution'] = "None" self.attributeDictionary['title' ] = "None" self.attributeDictionary['description'] = "None" else: self.attributeDictionary = attribute
def __init__(self, cloneMapFileName_or_latlonDict, attributeDictionary=None): # cloneMap if isinstance(cloneMapFileName_or_latlonDict, str): # define latitudes and longitudes based on cloneMap cloneMapFileName = cloneMapFileName_or_latlonDict pcr.setclone(cloneMapFileName) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) else: # define latitudes and longitudes based on latlonDict # NOT TESTED YET latlonDict = cloneMapFileName_or_latlonDict self.latitudes = latlonDict['lat'] self.longitudes = latlonDict['lon'] # make sure that latitudes are from high to low if self.latitudes[-1] > self.latitudes[0]: self.latitudes = self.latitudes[::-1] if self.longitudes[-1] < self.longitudes[0]: self.longitudes = self.longitudes[::-1] # netcdf format: self.format = 'NETCDF3_CLASSIC' self.attributeDictionary = {} if attributeDictionary == None: self.attributeDictionary['institution'] = "None" self.attributeDictionary['title'] = "None" self.attributeDictionary['source'] = "None" self.attributeDictionary['history'] = "None" self.attributeDictionary['references'] = "None" self.attributeDictionary['description'] = "None" self.attributeDictionary['comment'] = "None" else: self.attributeDictionary = attributeDictionary
def detRealCellLength(ZeroMap, sizeinmetres): """ Determine cellength. Always returns the length in meters. """ if sizeinmetres: reallength = pcr.celllength() xl = pcr.celllength() yl = pcr.celllength() else: aa = pcr.ycoordinate(pcr.boolean(pcr.cover(ZeroMap + 1, 1))) yl, xl = lattometres(aa) xl = xl * pcr.celllength() yl = yl * pcr.celllength() # Average length for surface area calculations. reallength = (xl + yl) * 0.5 return xl, yl, reallength
def __init__(self, clone_map_file_name): # cloneMap pcr.setclone(clone_map_file_name) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # Let users decide what their preference regarding latitude order. self.netcdf_y_orientation_follow_cf_convention = False # set the general netcdf attributes self.set_general_netcdf_attributes() # netcdf format and zlib setup self.format = 'NETCDF4' # 'NETCDF3_CLASSIC' self.zlib = True
def __init__(self, iniItems): # cloneMap pcr.setclone(iniItems.cloneMap) cloneMap = pcr.boolean(1.0) # latitudes and longitudes self.latitudes = np.unique( pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique( pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # TODO: Let users decide what their preference regarding latitude order. # Consult with Stefanie regarding CF convention. # netCDF format and attributes: self.format = 'NETCDF3_CLASSIC' self.attributeDictionary = {} self.attributeDictionary['institution'] = iniItems.globalOptions['institution'] self.attributeDictionary['title'] = iniItems.globalOptions['title'] self.attributeDictionary['description'] = iniItems.globalOptions['description']
def __init__(self, netcdffile, logging): """ First try to setup a class read netcdf files (converted with pcr2netcdf.py) netcdffile: file to read the forcing data from logging: python logging object vars: list of variables to get from file """ if os.path.exists(netcdffile): self.dataset = netCDF4.Dataset(netcdffile, mode="r") else: msg = os.path.abspath(netcdffile) + " not found!" logging.error(msg) raise ValueError(msg) try: self.x = self.dataset.variables["x"][:] except: self.x = self.dataset.variables["lon"][:] # Now check Y values to see if we must flip the data try: self.y = self.dataset.variables["y"][:] except: self.y = self.dataset.variables["lat"][:] x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :] y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0] (self.latidx, ) = np.logical_and(self.x >= x.min(), self.x < x.max()).nonzero() (self.lonidx, ) = np.logical_and(self.y >= x.min(), self.y < y.max()).nonzero() logging.info("Reading static input from netCDF file: " + netcdffile)
def __init__(self,cloneMapFileName,netcdf_attribute_description): # cloneMap cloneMap = pcr.boolean(pcr.readmap(cloneMapFileName)) cloneMap = pcr.boolean(pcr.scalar(1.0)) # latitudes and longitudes self.latitudes = np.unique(pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] self.longitudes = np.unique(pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) # netCDF format and attributes: self.format = 'NETCDF3_CLASSIC' self.attributeDictionary = {} self.attributeDictionary['institution'] = "European Commission - JRC" self.attributeDictionary['title' ] = "EFAS-Meteo 5km for the Rhine-Meuse basin" self.attributeDictionary['source' ] = "5km Gridded Meteo Database (C) European Commission - JRDC, 2014" self.attributeDictionary['history' ] = "The data were provided by Ad de Roo ([email protected]) on 19 November 2014 and then converted by Edwin H. Sutanudjaja ([email protected]) to netcdf files on 27 November 2014." self.attributeDictionary['references' ] = "Ntegeka et al., 2013. EFAS-Meteo: A European daily high-resolution gridded meteorological data set. JRC Technical Reports. doi: 10.2788/51262" self.attributeDictionary['comment' ] = "Please use this dataset only for Hyper-Hydro test bed experiments. " self.attributeDictionary['comment' ] += "For using it and publishing it, please acknowledge its source: 5km Gridded Meteo Database (C) European Commission - JRDC, 2014 and its reference: Ntegeka et al., 2013 (doi: 10.2788/51262). " self.attributeDictionary['comment' ] += "The data are in European ETRS projection, 5km grid; http://en.wikipedia.org/wiki/European_grid. " self.attributeDictionary['description'] = netcdf_attribute_description
def __init__(self, netcdffile, logging, vars=[]): """ First try to setup a class read netcdf files (converted with pcr2netcdf.py) netcdffile: file to read the forcing data from logging: python logging object vars: list of variables to get from file """ self.fname = netcdffile if os.path.exists(netcdffile): self.dataset = netCDF4.Dataset(netcdffile, mode="r") else: msg = os.path.abspath(netcdffile) + " not found!" logging.error(msg) raise ValueError(msg) logging.info("Reading state input from netCDF file: " + netcdffile) self.alldat = {} a = pcr.pcr2numpy(pcr.cover(0.0), 0.0).flatten() # Determine steps to load in mem based on estimated memory usage floatspermb = 1048576 / 4 maxmb = 40 self.maxsteps = maxmb * len(a) / floatspermb + 1 self.fstep = 0 self.lstep = self.fstep + self.maxsteps self.datetime = self.dataset.variables["time"][:] if hasattr(self.dataset.variables["time"], "units"): self.timeunits = self.dataset.variables["time"].units else: self.timeunits = "Seconds since 1970-01-01 00:00:00" if hasattr(self.dataset.variables["time"], "calendar"): self.calendar = self.dataset.variables["time"].calendar else: self.calendar = "gregorian" self.datetimelist = cftime.num2date(self.datetime, self.timeunits, calendar=self.calendar) try: self.x = self.dataset.variables["x"][:] except: self.x = self.dataset.variables["lon"][:] # Now check Y values to see if we must flip the data try: self.y = self.dataset.variables["y"][:] except: self.y = self.dataset.variables["lat"][:] # test if 1D or 2D array if len(self.y.shape) == 1: if self.y[0] > self.y[-1]: self.flip = False else: self.flip = True else: # not sure if this works self.y = self.y[:][0] if self.y[0] > self.y[-1]: self.flip = False else: self.flip = True x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :] y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0] # Get average cell size acc = ( np.diff(x).mean() * 0.25 ) # non-exact match needed becuase of possible rounding problems if self.flip: (self.latidx, ) = np.logical_and( self.y[::-1] + acc >= y.min(), self.y[::-1] <= y.max() + acc).nonzero() (self.lonidx, ) = np.logical_and( self.x + acc >= x.min(), self.x <= x.max() + acc).nonzero() else: (self.latidx, ) = np.logical_and( self.y + acc >= y.min(), self.y <= y.max() + acc).nonzero() (self.lonidx, ) = np.logical_and( self.x + acc >= x.min(), self.x <= x.max() + acc).nonzero() if len(self.lonidx) != len(x): logging.error("error in determining X coordinates in netcdf...") logging.error("model expects: " + str(x.min()) + " to " + str(x.max())) logging.error("got coordinates netcdf: " + str(self.x.min()) + " to " + str(self.x.max())) logging.error("got len from netcdf x: " + str(len(x)) + " expected " + str(len(self.lonidx))) raise ValueError("X coordinates in netcdf do not match model") if len(self.latidx) != len(y): logging.error("error in determining Y coordinates in netcdf...") logging.error("model expects: " + str(y.min()) + " to " + str(y.max())) logging.error("got from netcdf: " + str(self.y.min()) + " to " + str(self.y.max())) logging.error("got len from netcdf y: " + str(len(y)) + " expected " + str(len(self.latidx))) raise ValueError("Y coordinates in netcdf do not match model") for var in vars: try: self.alldat[var] = self.dataset.variables[var][self.fstep:self. maxsteps] except: self.alldat.pop(var, None) logging.warning("Variable " + var + " not found in netcdf file: " + netcdffile)
def __init__( self, netcdffile, logger, starttime, timesteps, EPSG="EPSG:4326", timestepsecs=86400, metadata={}, zlib=True, Format="NETCDF4", maxbuf=25, least_significant_digit=None, ): """ Under construction """ self.EPSG = EPSG self.zlib = zlib self.Format = Format self.least_significant_digit = least_significant_digit def date_range(start, end, timestepsecs): r = int( (end + dt.timedelta(seconds=timestepsecs) - start).total_seconds() / timestepsecs ) return [start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r)] self.logger = logger # Do not allow a max buffer larger than the number of timesteps self.maxbuf = maxbuf if timesteps >= maxbuf else timesteps self.ncfile = netcdffile self.timesteps = timesteps rows = pcr.clone().nrRows() cols = pcr.clone().nrCols() cellsize = pcr.clone().cellSize() yupper = pcr.clone().north() xupper = pcr.clone().west() x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :] y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0] # Shift one timestep as we output at the end # starttime = starttime + dt.timedelta(seconds=timestepsecs) end = starttime + dt.timedelta(seconds=timestepsecs * (self.timesteps - 1)) timeList = date_range(starttime, end, timestepsecs) self.timestepbuffer = np.zeros((self.maxbuf, len(y), len(x))) self.bufflst = {} self.buffdirty = False globmetadata.update(metadata) prepare_nc( self.ncfile, timeList, x, y, globmetadata, logger, Format=self.Format, EPSG=EPSG, zlib=self.zlib, least_significant_digit=self.least_significant_digit, )
def __init__(self, netcdffile, logging, vars=[]): """ First try to setup a class read netcdf files (converted with pcr2netcdf.py) netcdffile: file to read the forcing data from logging: python logging object vars: list of variables to get from file """ self.fname = netcdffile if os.path.exists(netcdffile): self.dataset = netCDF4.Dataset(netcdffile, mode="r") else: msg = os.path.abspath(netcdffile) + " not found!" logging.error(msg) raise ValueError(msg) logging.info("Reading state input from netCDF file: " + netcdffile) self.alldat = {} a = pcr.pcr2numpy(pcr.cover(0.0), 0.0).flatten() # Determine steps to load in mem based on estimated memory usage floatspermb = 1048576 / 4 maxmb = 40 self.maxsteps = maxmb * len(a) / floatspermb + 1 self.fstep = 0 self.lstep = self.fstep + self.maxsteps self.datetime = self.dataset.variables["time"][:] if hasattr(self.dataset.variables["time"], "units"): self.timeunits = self.dataset.variables["time"].units else: self.timeunits = "Seconds since 1970-01-01 00:00:00" if hasattr(self.dataset.variables["time"], "calendar"): self.calendar = self.dataset.variables["time"].calendar else: self.calendar = "gregorian" self.datetimelist = cftime.num2date( self.datetime, self.timeunits, calendar=self.calendar ) try: self.x = self.dataset.variables["x"][:] except: self.x = self.dataset.variables["lon"][:] # Now check Y values to see if we must flip the data try: self.y = self.dataset.variables["y"][:] except: self.y = self.dataset.variables["lat"][:] # test if 1D or 2D array if len(self.y.shape) == 1: if self.y[0] > self.y[-1]: self.flip = False else: self.flip = True else: # not sure if this works self.y = self.y[:][0] if self.y[0] > self.y[-1]: self.flip = False else: self.flip = True x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :] y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0] # Get average cell size acc = ( np.diff(x).mean() * 0.25 ) # non-exact match needed becuase of possible rounding problems if self.flip: (self.latidx,) = np.logical_and( self.y[::-1] + acc >= y.min(), self.y[::-1] <= y.max() + acc ).nonzero() (self.lonidx,) = np.logical_and( self.x + acc >= x.min(), self.x <= x.max() + acc ).nonzero() else: (self.latidx,) = np.logical_and( self.y + acc >= y.min(), self.y <= y.max() + acc ).nonzero() (self.lonidx,) = np.logical_and( self.x + acc >= x.min(), self.x <= x.max() + acc ).nonzero() if len(self.lonidx) != len(x): logging.error("error in determining X coordinates in netcdf...") logging.error("model expects: " + str(x.min()) + " to " + str(x.max())) logging.error( "got coordinates netcdf: " + str(self.x.min()) + " to " + str(self.x.max()) ) logging.error( "got len from netcdf x: " + str(len(x)) + " expected " + str(len(self.lonidx)) ) raise ValueError("X coordinates in netcdf do not match model") if len(self.latidx) != len(y): logging.error("error in determining Y coordinates in netcdf...") logging.error("model expects: " + str(y.min()) + " to " + str(y.max())) logging.error( "got from netcdf: " + str(self.y.min()) + " to " + str(self.y.max()) ) logging.error( "got len from netcdf y: " + str(len(y)) + " expected " + str(len(self.latidx)) ) raise ValueError("Y coordinates in netcdf do not match model") for var in vars: try: self.alldat[var] = self.dataset.variables[var][ self.fstep : self.maxsteps ] except: self.alldat.pop(var, None) logging.warning( "Variable " + var + " not found in netcdf file: " + netcdffile )
cmd = "Extending class: step "+str(i)+" from " + str(max_step) print(cmd) uniqueIDs = pcr.cover(uniqueIDs, pcr.windowmajority(uniqueIDs, 0.5)) # - use only cells within the landmask uniqueIDs = pcr.ifthen(landmask, uniqueIDs) pcr.report(uniqueIDs, "class_ids.map") # cell area at 5 arc min resolution cellArea = vos.readPCRmapClone(cellArea05minFile, cloneMapFileName, tmp_directory) cellArea = pcr.ifthen(landmask, cellArea) # get a sample cell for every id x_min_for_each_id = pcr.areaminimum(pcr.xcoordinate(pcr.boolean(1.0)), uniqueIDs) sample_cells = pcr.xcoordinate(pcr.boolean(1.0)) == x_min_for_each_id y_min_for_each_id = pcr.areaminimum(pcr.ycoordinate(sample_cells), uniqueIDs) sample_cells = pcr.ycoordinate(sample_cells) == y_min_for_each_id uniqueIDs_sample = pcr.ifthen(sample_cells, uniqueIDs) # - save it to a pcraster map file pcr.report(uniqueIDs_sample, "sample.ids") # calculate the country values index = 0 # for posCnt for iYear in range(staYear,endYear+1): # time stamp and index for netcdf files: index = index + 1 timeStamp = datetime.datetime(int(iYear), int(12), int(31), int(0)) fulldate = '%4i-%02i-%02i' %(int(iYear), int(12), int(31)) print fulldate
cell_area_05min = vos.readPCRmapClone(cell_area_05min_file, clone_map_05min_file, \ tmp_directory) # 30 min cell ids cell_ids_30min_file = "/data/hydroworld/others/irrigationZones/half_arc_degree/uniqueIds30min.nom.map" cell_ids_30min = vos.readPCRmapClone(cell_ids_30min_file , clone_map_05min_file, \ tmp_directory, \ None, False, None, True) cell_ids_30min = pcr.nominal(cell_ids_30min) # reporting objects # - for 5 arcmin resolution latlonDict05min = {} cloneMap = pcr.boolean(1.0) latlonDict05min['lat'] = np.unique( pcr.pcr2numpy(pcr.ycoordinate(cloneMap), vos.MV))[::-1] latlonDict05min['lon'] = np.unique( pcr.pcr2numpy(pcr.xcoordinate(cloneMap), vos.MV)) report_netcdf_05min = outputNetCDF.OutputNetCDF(latlonDict05min) # - for 30 arcmin resolution latlonDict30min = {} latlonDict30min['lat'] = np.arange( np.round(latlonDict05min['lat'][0] + 2.5 / 60 - 0.25, 2), latlonDict05min['lat'][-1] - 2.5 / 60, -0.5) latlonDict30min['lon'] = np.arange( np.round(latlonDict05min['lon'][0] - 2.5 / 60 + 0.25, 2), latlonDict05min['lon'][-1] + 2.5 / 60, 0.5) report_netcdf_30min = outputNetCDF.OutputNetCDF(latlonDict30min) # TODO: Make this module writes for CF convention (see Hessel's document) # preparing the file at 5 arcmin resolution:
def initial(self): """ *Required* Initial part of the model, executed only once. It reads all static model information (parameters) and sets-up the variables used in modelling. This function is required. The contents is free. However, in order to easily connect to other models it is advised to adhere to the directory structure used in the other models. """ #: pcraster option to calculate with units or cells. Not really an issue #: in this model but always good to keep in mind. pcr.setglobaloption("unittrue") pcr.setglobaloption( "radians" ) # Needed as W3RA was originally written in matlab # SET GLBOAL PARAMETER VALUES (however not used in original script) # Nhru=2 # K_gw_scale=0.0146 # K_gw_shape=0.0709 # K_rout_scale=0.1943 # K_rout_int=0.0589 # FdrainFC_scale=0.2909 # FdrainFC_shape=0.5154 # Sgref_scale=3.2220 # Sgref_shape=3.2860 # fday=0.5000 self.timestepsecs = int( configget(self.config, "model", "timestepsecs", "86400") ) self.reinit = int(configget(self.config, "run", "reinit", "0")) self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0")) self.UseETPdata = int( configget(self.config, "model", "UseETPdata", "1") ) # 1: Use ETP data, 0: Compute ETP from meteorological variables self.logger.debug("use DATA: " + str(self.UseETPdata)) self.basetimestep = 86400 self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps" # Define here the W3RA mapstacks (best to read these via netcdf) self.TMAX_mapstack = self.Dir + configget( self.config, "inputmapstacks", "TMAX", "/inmaps/TMAX" ) self.TMIN_mapstack = self.Dir + configget( self.config, "inputmapstacks", "TMIN", "/inmaps/TMIN" ) self.TDAY_mapstack = self.Dir + configget( self.config, "inputmapstacks", "TDAY", "/inmaps/TDAY" ) self.EPOT_mapstack = self.Dir + configget( self.config, "inputmapstacks", "EPOT", "/inmaps/EPOT" ) self.PRECIP_mapstack = self.Dir + configget( self.config, "inputmapstacks", "PRECIP", "/inmaps/PRECIP" ) self.RAD_mapstack = self.Dir + configget( self.config, "inputmapstacks", "RAD", "/inmaps/RAD" ) # self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED") # self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS") self.ALBEDO_mapstack = self.Dir + configget( self.config, "inputmapstacks", "ALBEDO", "/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO", ) self.WINDSPEED_mapstack = self.Dir + configget( self.config, "inputmapstacks", "WINDSPEED", "/inmaps/WIND" ) self.AIRPRESS_mapstack = self.Dir + configget( self.config, "inputmapstacks", "AIRPRESS", "/inmaps/PRES" ) self.Altitude = pcr.readmap(self.Dir + "/staticmaps/wflow_dem") self.latitude = pcr.ycoordinate(pcr.boolean(self.Altitude)) # Add reading of parameters here self.K_gw = self.wf_readmap( os.path.join(self.Dir, "staticmaps/k_gw.map"), 0.0, fail=True ) self.K_rout = self.wf_readmap( os.path.join(self.Dir, "staticmaps/k_rout.map"), 0.0, fail=True ) self.Sgref = self.wf_readmap( os.path.join(self.Dir, "staticmaps/sgref.map"), 0.0, fail=True ) self.alb_dry1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/alb_dry.map"), 0.0, fail=True ) self.alb_wet1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/alb_wet.map"), 0.0, fail=True ) self.beta1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/beta.map"), 0.0, fail=True ) self.cGsmax1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/cgsmax.map"), 0.0, fail=True ) self.ER_frac_ref1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/er_frac_ref.map"), 0.0, fail=True ) self.FdrainFC1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fdrainfc.map"), 0.0, fail=True ) self.Fgw_conn1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fgw_conn.map"), 0.0, fail=True ) self.Fhru1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fhru.map"), 0.0, fail=True ) self.SLA1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/sla.map"), 0.0, fail=True ) self.LAIref1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/lairef.map"), 0.0, fail=True ) self.FsoilEmax1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fsoilemax.map"), 0.0, fail=True ) self.fvegref_G1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fvegref_g.map"), 0.0, fail=True ) self.FwaterE1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fwatere.map"), 0.0, fail=True ) self.Gfrac_max1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/gfrac_max.map"), 0.0, fail=True ) self.hveg1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/hveg.map"), 0.0, fail=True ) self.InitLoss1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/initloss.map"), 0.0, fail=True ) self.LAImax1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/laimax.map"), 0.0, fail=True ) self.PrefR1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/prefr.map"), 0.0, fail=True ) self.S_sls1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/s_sls.map"), 0.0, fail=True ) self.S0FC1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/s0fc.map"), 0.0, fail=True ) self.SsFC1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/ssfc.map"), 0.0, fail=True ) self.SdFC1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/sdfc.map"), 0.0, fail=True ) self.Vc1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/vc.map"), 0.0, fail=True ) self.w0ref_alb1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/w0ref_alb.map"), 0.0, fail=True ) self.Us01 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/us0.map"), 0.0, fail=True ) self.Ud01 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/ud0.map"), 0.0, fail=True ) self.wslimU1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/wslimu.map"), 0.0, fail=True ) self.wdlimU1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/wdlimu.map"), 0.0, fail=True ) self.w0limE1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/w0lime.map"), 0.0, fail=True ) self.Tgrow1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/tgrow.map"), 0.0, fail=True ) self.Tsenc1 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/tsenc.map"), 0.0, fail=True ) self.alb_dry2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/alb_dry2.map"), 0.0, fail=True ) self.alb_wet2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/alb_wet2.map"), 0.0, fail=True ) self.beta2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/beta2.map"), 0.0, fail=True ) self.cGsmax2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/cgsmax2.map"), 0.0, fail=True ) self.ER_frac_ref2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/er_frac_ref2.map"), 0.0, fail=True ) self.FdrainFC2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fdrainfc2.map"), 0.0, fail=True ) self.Fgw_conn2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fgw_conn2.map"), 0.0, fail=True ) self.Fhru2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fhru2.map"), 0.0, fail=True ) self.SLA2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/sla2.map"), 0.0, fail=True ) self.LAIref2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/lairef2.map"), 0.0, fail=True ) self.FsoilEmax2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fsoilemax2.map"), 0.0, fail=True ) self.fvegref_G2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fvegref_g2.map"), 0.0, fail=True ) self.FwaterE2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/fwatere2.map"), 0.0, fail=True ) self.Gfrac_max2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/gfrac_max2.map"), 0.0, fail=True ) self.hveg2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/hveg2.map"), 0.0, fail=True ) self.InitLoss2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/initloss2.map"), 0.0, fail=True ) self.LAImax2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/laimax2.map"), 0.0, fail=True ) self.PrefR2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/prefr2.map"), 0.0, fail=True ) self.S_sls2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/s_sls2.map"), 0.0, fail=True ) self.S0FC2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/s0fc2.map"), 0.0, fail=True ) self.SsFC2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/ssfc2.map"), 0.0, fail=True ) self.SdFC2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/sdfc2.map"), 0.0, fail=True ) self.Vc2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/vc2.map"), 0.0, fail=True ) self.w0ref_alb2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/w0ref_alb2.map"), 0.0, fail=True ) self.Us02 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/us02.map"), 0.0, fail=True ) self.Ud02 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/ud02.map"), 0.0, fail=True ) self.wslimU2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/wslimu2.map"), 0.0, fail=True ) self.wdlimU2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/wdlimu2.map"), 0.0, fail=True ) self.w0limE2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/w0lime2.map"), 0.0, fail=True ) self.Tgrow2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/tgrow2.map"), 0.0, fail=True ) self.Tsenc2 = self.wf_readmap( os.path.join(self.Dir, "staticmaps/tsenc2.map"), 0.0, fail=True ) self.wf_multparameters() # Static, for the computation of Aerodynamic conductance (3.7) self.fh1 = pcr.ln(813.0 / self.hveg1 - 5.45) self.fh2 = pcr.ln(813.0 / self.hveg2 - 5.45) self.ku2_1 = 0.305 / (self.fh1 * (self.fh1 + 2.3)) self.ku2_2 = 0.305 / (self.fh2 * (self.fh2 + 2.3)) self.logger.info("Starting Dynamic run...")
def main(): #-initialization # MVs MV= -999. # minimum catchment size to process catchmentSizeLimit= 0.0 # period of interest, start and end year startYear= 1961 endYear= 2010 # maps cloneMapFileName= '/data/hydroworld/PCRGLOBWB20/input30min/global/Global_CloneMap_30min.map' lddFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map' cellAreaFileName= '/data/hydroworld/PCRGLOBWB20/input30min/routing/cellarea30min.map' # set clone pcr.setclone(cloneMapFileName) # output outputPath= '/scratch/rens/reservedrecharge' percentileMapFileName= os.path.join(outputPath,'q%03d_cumsec.map') textFileName= os.path.join(outputPath,'groundwater_environmentalflow_%d.txt') fractionReservedRechargeMapFileName= os.path.join(outputPath,'fraction_reserved_recharge%d.map') fractionMinimumReservedRechargeMapFileName= os.path.join(outputPath,'minimum_fraction_reserved_recharge%d.map') # input inputPath= '/nfsarchive/edwin-emergency-backup-DO-NOT-DELETE/rapid/edwin/05min_runs_results/2015_04_27/non_natural_2015_04_27/global/netcdf/' # define data to be read from netCDF files ncData= {} variableName= 'totalRunoff' ncData[variableName]= {} ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc') ncData[variableName]['fileRoot']= os.path.join(outputPath,'qloc') ncData[variableName]['annualAverage']= pcr.scalar(0) variableName= 'gwRecharge' ncData[variableName]= {} ncData[variableName]['fileName']= os.path.join(inputPath,'gwRecharge_monthTot_output.nc') ncData[variableName]['fileRoot']= os.path.join(outputPath,'gwrec') ncData[variableName]['annualAverage']= pcr.scalar(0) variableName= 'discharge' ncData[variableName]= {} ncData[variableName]['fileName']= os.path.join(inputPath,'totalRunoff_monthTot_output.nc') ncData[variableName]['fileRoot']= os.path.join(outputPath,'qc') ncData[variableName]['annualAverage']= pcr.scalar(0) ncData[variableName]['mapStack']= np.array([]) # percents and environmental flow condition set as percentile percents= range(10,110,10) environmentalFlowPercent= 10 if environmentalFlowPercent not in percents: percents.append(environmentalFlowPercent) percents.sort() #-start # obtain attributes pcr.setclone(cloneMapFileName) cloneSpatialAttributes= spatialAttributes(cloneMapFileName) years= range(startYear,endYear+1) # output path if not os.path.isdir(outputPath): os.makedirs(outputPath) os.chdir(outputPath) # compute catchments ldd= pcr.readmap(lddFileName) cellArea= pcr.readmap(cellAreaFileName) catchments= pcr.catchment(ldd,pcr.pit(ldd)) fractionWater= pcr.scalar(0.0) # temporary! lakeMask= pcr.boolean(0) # temporary! pcr.report(catchments,os.path.join(outputPath,'catchments.map')) maximumCatchmentID= int(pcr.cellvalue(pcr.mapmaximum(pcr.scalar(catchments)),1)[0]) # iterate over years weight= float(len(years))**-1 for year in years: #-echo year print ' - processing year %d' % year #-process data startDate= datetime.datetime(year,1,1) endDate= datetime.datetime(year,12,31) timeSteps= endDate.toordinal()-startDate.toordinal()+1 dynamicIncrement= 1 for variableName in ncData.keys(): print ' extracting %s' % variableName, ncFileIn= ncData[variableName]['fileName'] #-process data pcrDataSet= pcrObject(variableName, ncData[variableName]['fileRoot'],\ ncFileIn,cloneSpatialAttributes, pcrVALUESCALE= pcr.Scalar, resamplingAllowed= True,\ dynamic= True, dynamicStart= startDate, dynamicEnd= endDate, dynamicIncrement= dynamicIncrement, ncDynamicDimension= 'time') pcrDataSet.initializeFileInfo() pcrDataSet.processFileInfo() for fileInfo in pcrDataSet.fileProcessInfo.values()[0]: tempFileName= fileInfo[1] variableField= pcr.readmap(tempFileName) variableField= pcr.ifthen(pcr.defined(ldd),pcr.cover(variableField,0)) if variableName == 'discharge': dayNumber= int(os.path.splitext(tempFileName)[1].strip('.')) date= datetime.date(year,1,1)+datetime.timedelta(dayNumber-1) numberDays= calendar.monthrange(year,date.month)[1] variableField= pcr.max(0,pcr.catchmenttotal(variableField*cellArea,ldd)/(numberDays*24*3600)) ncData[variableName]['annualAverage']+= weight*variableField if 'mapStack' in ncData[variableName].keys(): tempArray= pcr2numpy(variableField,MV) mask= tempArray != MV if ncData[variableName]['mapStack'].size != 0: ncData[variableName]['mapStack']= np.vstack((ncData[variableName]['mapStack'],tempArray[mask])) else: ncData[variableName]['mapStack']= tempArray[mask] coordinates= np.zeros((ncData[variableName]['mapStack'].size,2)) pcr.setglobaloption('unitcell') tempArray= pcr2numpy(pcr.ycoordinate(pcr.boolean(1))+0.5,MV) coordinates[:,0]= tempArray[mask] tempArray= pcr2numpy(pcr.xcoordinate(pcr.boolean(1))+0.5,MV) coordinates[:,1]= tempArray[mask] os.remove(tempFileName) # delete object pcrDataSet= None del pcrDataSet # close line on screen print # report annual averages key= 'annualAverage' ncData['discharge'][key]/= 12 for variableName in ncData.keys(): ncData[variableName][key]= pcr.max(0,ncData[variableName][key]) pcr.report(ncData[variableName][key],\ os.path.join(outputPath,'%s_%s.map' % (variableName,key))) # remove aux.xml for tempFileName in os.listdir(outputPath): if 'aux.xml' in tempFileName: os.remove(tempFileName) # sort data print 'sorting discharge data' variableName= 'discharge' key= 'mapStack' indices= np.zeros((ncData[variableName][key].shape),np.uint) for iCnt in xrange(ncData[variableName][key].shape[1]): indices[:,iCnt]= ncData[variableName][key][:,iCnt].argsort(kind= 'mergesort') ncData[variableName][key][:,iCnt]= ncData[variableName][key][:,iCnt][indices[:,iCnt]] # extract values for percentiles print 'returning maps' for percent in percents: percentile= 0.01*percent index0= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])) index1= min(ncData[variableName][key].shape[0]-1,int(percentile*ncData[variableName][key].shape[0])+1) x0= float(index0)/ncData[variableName][key].shape[0] x1= float(index1)/ncData[variableName][key].shape[0] if x0 <> x1: y= ncData[variableName][key][index0,:]+(percentile-x0)*\ (ncData[variableName][key][index1,:]-ncData[variableName][key][index0,:])/(x1-x0) else: y= ncData[variableName][key][index0,:] # convert a slice of the stack into an array tempArray= np.ones((cloneSpatialAttributes.numberRows,cloneSpatialAttributes.numberCols))*MV for iCnt in xrange(coordinates.shape[0]): row= coordinates[iCnt,0]-1 col= coordinates[iCnt,1]-1 tempArray[row,col]= y[iCnt] variableField= numpy2pcr(pcr.Scalar,tempArray,MV) pcr.report(variableField,percentileMapFileName % percent) if percent == environmentalFlowPercent: ncData[variableName]['environmentalFlow']= variableField tempArray= None; variableField= None del tempArray, variableField # process environmental flow # initialize map of reserved recharge fraction fractionReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0)) fractionMinimumReservedRechargeMap= pcr.ifthen(ncData[variableName]['environmentalFlow'] < 0,pcr.scalar(0)) textFile= open(textFileName % environmentalFlowPercent,'w') hStr= 'Environmental flow analysis per basin, resulting in a map of renewable, exploitable recharge, for the %d%s quantile of discharge\n' % (environmentalFlowPercent,'%') hStr+= 'Returns Q_%d/R, the fraction of reserved recharge needed to sustain fully the environental flow requirement defined as the %d percentile,\n' % (environmentalFlowPercent, environmentalFlowPercent) hStr+= 'and Q*_%d/R, a reduced fraction that takes the availability of surface water into account\n' % environmentalFlowPercent textFile.write(hStr) print hStr # create header to display on screen and write to file # reported are: 1: ID, 2: Area, 3: average discharge, 4: environmental flow, 5: average recharge, # 6: Q_%d/Q, 7: Q_%d/R_Avg, 8: R_Avg/Q_Avg, 9: Q*_%d/R_Avg hStr= '%6s,%15s,%15s,%15s,%15s,%15s,%15s,%15s,%15s\n' % \ ('ID','Area [km2]','Q_Avg [m3]','Q_%d [m3]' % environmentalFlowPercent ,'R_Avg [m3]','Q_%d/Q_Avg [-]' % environmentalFlowPercent,\ 'Q_%d/Q_Avg [-]' % environmentalFlowPercent,'R_Avg/Q_Avg [-]','Q*_%d/Q_Avg [-]' % environmentalFlowPercent) textFile.write(hStr) print hStr for catchment in xrange(1,maximumCatchmentID+1): # create catchment mask and check whether it does not coincide with a lake catchmentMask= catchments == catchment catchmentSize= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,cellArea*1.e-6)),1)[0] #~ ##~ if pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(lakeMask))),1) <> \ #~ ##~ pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,pcr.scalar(catchmentMask))),1)[0] and \ #~ ##~ catchmentSize > catchmentSizeLimit: key= 'annualAverage' variableName= 'discharge' if bool(pcr.cellvalue(pcr.maptotal(pcr.ifthen((ldd == 5) & catchmentMask,\ pcr.scalar(ncData[variableName][key] > 0))),1)[0]) and catchmentSize >= catchmentSizeLimit: # valid catchment, process # all volumes are in m3 per year key= 'annualAverage' catchmentAverageDischarge= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\ ncData[variableName][key])),1)[0]*365.25*3600*24 variableName= 'gwRecharge' catchmentRecharge= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\ (1.-fractionWater)*cellArea)),1)[0] variableName= 'totalRunoff' catchmentRunoff= pcr.cellvalue(pcr.maptotal(pcr.ifthen(catchmentMask,ncData[variableName][key]*\ cellArea)),1)[0] key= 'environmentalFlow' variableName= 'discharge' catchmentEnvironmentalFlow= pcr.cellvalue(pcr.mapmaximum(pcr.ifthen(catchmentMask & (ldd == 5),\ ncData[variableName][key])),1)[0]*365.25*3600*24 catchmentRunoff= max(catchmentRunoff,catchmentEnvironmentalFlow) if catchmentAverageDischarge > 0.: fractionEnvironmentalFlow= catchmentEnvironmentalFlow/catchmentAverageDischarge fractionGroundWaterContribution= catchmentRecharge/catchmentAverageDischarge else: fractionEnvironmentalFlow= 0. fractionGroundWaterContribution= 0. if catchmentRecharge > 0: fractionReservedRecharge= min(1,catchmentEnvironmentalFlow/catchmentRecharge) else: fractionReservedRecharge= 1.0 fractionMinimumReservedRecharge= (fractionReservedRecharge+fractionGroundWaterContribution-\ fractionReservedRecharge*fractionGroundWaterContribution)*fractionReservedRecharge #~ # echo to screen, and write to file and map wStr= '%6s,%15.1f,%15.6g,%15.6g,%15.6g,%15.6f,%15.6f,%15.6f,%15.6f\n' % \ (catchment,catchmentSize,catchmentAverageDischarge,catchmentEnvironmentalFlow,catchmentRecharge,\ fractionEnvironmentalFlow,fractionReservedRecharge,fractionGroundWaterContribution,fractionMinimumReservedRecharge) print wStr textFile.write(wStr) # update maps fractionReservedRechargeMap= pcr.ifthenelse(catchmentMask,\ pcr.scalar(fractionReservedRecharge),fractionReservedRechargeMap) fractionMinimumReservedRechargeMap= pcr.ifthenelse(catchmentMask,\ pcr.scalar(fractionMinimumReservedRecharge),fractionMinimumReservedRechargeMap) #-report map and close text file pcr.report(fractionReservedRechargeMap,fractionReservedRechargeMapFileName % environmentalFlowPercent) pcr.report(fractionMinimumReservedRechargeMap,fractionMinimumReservedRechargeMapFileName % environmentalFlowPercent) # close text file textFile.close() # finished print 'all done!'
def __init__( self, netcdffile, logger, starttime, timesteps, EPSG="EPSG:4326", timestepsecs=86400, metadata={}, zlib=True, Format="NETCDF4", maxbuf=25, least_significant_digit=None, ): """ Under construction """ self.EPSG = EPSG self.zlib = zlib self.Format = Format self.least_significant_digit = least_significant_digit def date_range(start, end, timestepsecs): r = int((end + dt.timedelta(seconds=timestepsecs) - start).total_seconds() / timestepsecs) return [ start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r) ] self.logger = logger # Do not allow a max buffer larger than the number of timesteps self.maxbuf = maxbuf if timesteps >= maxbuf else timesteps self.ncfile = netcdffile self.timesteps = timesteps rows = pcr.clone().nrRows() cols = pcr.clone().nrCols() cellsize = pcr.clone().cellSize() yupper = pcr.clone().north() xupper = pcr.clone().west() x = pcr.pcr2numpy(pcr.xcoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[0, :] y = pcr.pcr2numpy(pcr.ycoordinate(pcr.boolean(pcr.cover(1.0))), np.nan)[:, 0] # Shift one timestep as we output at the end # starttime = starttime + dt.timedelta(seconds=timestepsecs) end = starttime + dt.timedelta(seconds=timestepsecs * (self.timesteps - 1)) timeList = date_range(starttime, end, timestepsecs) self.timestepbuffer = np.zeros((self.maxbuf, len(y), len(x))) self.bufflst = {} self.buffdirty = False globmetadata.update(metadata) prepare_nc( self.ncfile, timeList, x, y, globmetadata, logger, Format=self.Format, EPSG=EPSG, zlib=self.zlib, least_significant_digit=self.least_significant_digit, )
def __init__(self, iniItems, landmask, spinUp): object.__init__(self) self.cloneMap = iniItems.cloneMap self.tmpDir = iniItems.tmpDir self.inputDir = iniItems.globalOptions['inputDir'] # landmask/area of interest self.landmask = landmask if iniItems.globalOptions['landmask'] != "None": self.landmask = vos.readPCRmapClone(\ iniItems.globalOptions['landmask'], self.cloneMap,self.tmpDir,self.inputDir) # option to ignore snow (temperature will be set to 25 deg C if this option is activated) self.ignore_snow = False if 'ignoreSnow' in list(iniItems.meteoOptions.keys( )) and iniItems.meteoOptions['ignoreSnow'] == "True": self.ignore_snow = True self.preFileNC = iniItems.meteoOptions[ 'precipitationNC'] # starting from 19 Feb 2014, we only support netcdf input files self.tmpFileNC = iniItems.meteoOptions['temperatureNC'] self.refETPotMethod = iniItems.meteoOptions['referenceETPotMethod'] if self.refETPotMethod == 'Hamon': self.latitudes = \ pcr.ycoordinate(self.cloneMap) # needed to calculate 'referenceETPot' if self.refETPotMethod == 'Input': self.etpFileNC = \ iniItems.meteoOptions['refETPotFileNC'] #----------------------------------------------------------------------- # NOTE: RvB 13/07/2016 Added correction constant and factor and variable name # to allow for easier use of netCDF climate inpute files # EHS 20/08/2016 modified for more flexibilities. # - meteo conversion factors self.preConst = 0.0 self.preFactor = 1.0 self.tmpConst = 0.0 self.tmpFactor = 1.0 self.refETPotConst = 0.0 self.refETPotFactor = 1.0 self.read_meteo_conversion_factors(iniItems.meteoOptions) # - variable names self.preVarName = 'precipitation' self.tmpVarName = 'temperature' self.refETPotVarName = 'evapotranspiration' self.read_meteo_variable_names(iniItems.meteoOptions) # daily time step self.usingDailyTimeStepForcingData = False if iniItems.timeStep == 1.0 and iniItems.timeStepUnit == "day": self.usingDailyTimeStepForcingData = True # forcing downscaling options: self.forcingDownscalingOptions(iniItems) # option to use netcdf files that are defined per year (one file for each year) self.precipitation_set_per_year = iniItems.meteoOptions[ 'precipitation_set_per_year'] == "True" self.temperature_set_per_year = iniItems.meteoOptions[ 'temperature_set_per_year'] == "True" self.refETPotFileNC_set_per_year = iniItems.meteoOptions[ 'refETPotFileNC_set_per_year'] == "True" # make the iniItems available for the other modules: self.iniItems = iniItems self.report = True try: self.outDailyTotNC = iniItems.meteoOptions['outDailyTotNC'].split( ",") self.outMonthTotNC = iniItems.meteoOptions['outMonthTotNC'].split( ",") self.outMonthAvgNC = iniItems.meteoOptions['outMonthAvgNC'].split( ",") self.outMonthEndNC = iniItems.meteoOptions['outMonthEndNC'].split( ",") self.outAnnuaTotNC = iniItems.meteoOptions['outAnnuaTotNC'].split( ",") self.outAnnuaAvgNC = iniItems.meteoOptions['outAnnuaAvgNC'].split( ",") self.outAnnuaEndNC = iniItems.meteoOptions['outAnnuaEndNC'].split( ",") except: self.report = False if self.report == True: # daily output in netCDF files: self.outNCDir = iniItems.outNCDir self.netcdfObj = PCR2netCDF(iniItems) # if self.outDailyTotNC[0] != "None": for var in self.outDailyTotNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_dailyTot.nc",\ var,"undefined") # MONTHly output in netCDF files: # - cummulative if self.outMonthTotNC[0] != "None": for var in self.outMonthTotNC: # initiating monthlyVarTot (accumulator variable): vars(self)[var + 'MonthTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthTot.nc",\ var,"undefined") # - average if self.outMonthAvgNC[0] != "None": for var in self.outMonthAvgNC: # initiating monthlyTotAvg (accumulator variable) vars(self)[var + 'MonthTot'] = None # initiating monthlyVarAvg: vars(self)[var + 'MonthAvg'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthAvg.nc",\ var,"undefined") # - last day of the month if self.outMonthEndNC[0] != "None": for var in self.outMonthEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthEnd.nc",\ var,"undefined") # YEARly output in netCDF files: # - cummulative if self.outAnnuaTotNC[0] != "None": for var in self.outAnnuaTotNC: # initiating yearly accumulator variable: vars(self)[var + 'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaTot.nc",\ var,"undefined") # - average if self.outAnnuaAvgNC[0] != "None": for var in self.outAnnuaAvgNC: # initiating annualyVarAvg: vars(self)[var + 'AnnuaAvg'] = None # initiating annualyTotAvg (accumulator variable) vars(self)[var + 'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaAvg.nc",\ var,"undefined") # - last day of the year if self.outAnnuaEndNC[0] != "None": for var in self.outAnnuaEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaEnd.nc",\ var,"undefined")
def getPCRcoords(PCRmap, missing_value_pcr=-999): """ Get all vertices coordinates of a PCRaster map. Input: ----- pcraster map (preferrably landmask) value for MV (optional, default at -999) Output: ------ list of (x,y) coordinates of each polygon """ # Get coordinates as numpy array: # upper left coordinates pcr.setglobaloption("coorul") xcoord_pcr_ul_map = pcr.xcoordinate(PCRmap) xcoord_pcr_ul_np = pcr.pcr2numpy(xcoord_pcr_ul_map, missing_value_pcr) ycoord_pcr_ul_map = pcr.ycoordinate(PCRmap) ycoord_pcr_ul_np = pcr.pcr2numpy(ycoord_pcr_ul_map, missing_value_pcr) # lower right coordinates pcr.setglobaloption("coorlr") xcoord_pcr_lr_map = pcr.xcoordinate(PCRmap) xcoord_pcr_lr_np = pcr.pcr2numpy(xcoord_pcr_lr_map, missing_value_pcr) ycoord_pcr_lr_map = pcr.ycoordinate(PCRmap) ycoord_pcr_lr_np = pcr.pcr2numpy(ycoord_pcr_lr_map, missing_value_pcr) # centroid coordinates pcr.setglobaloption("coorcentre") xcoord_pcr_centr_map = pcr.xcoordinate(PCRmap) xcoord_pcr_centr_np = pcr.pcr2numpy(xcoord_pcr_centr_map, missing_value_pcr) ycoord_pcr_centr_map = pcr.ycoordinate(PCRmap) ycoord_pcr_centr_np = pcr.pcr2numpy(ycoord_pcr_centr_map, missing_value_pcr) # Construct collection of polygon vertices: # number of arrays/elements to loop over and/or construct new arrays array_count_pcr = len(ycoord_pcr_lr_np) elements_per_array_pcr = np.size(ycoord_pcr_lr_np) / array_count_pcr nonmiss_val_per_array_pcr = np.sum(ycoord_pcr_lr_np != missing_value_pcr) # filling empty arrays while looping over data i, j = np.where(xcoord_pcr_lr_np != missing_value_pcr) xcoord_pcr_lr_np_nonmiss = xcoord_pcr_lr_np[i, j] xcoord_pcr_ul_np_nonmiss = xcoord_pcr_ul_np[i, j] xcoord_pcr_ll_np_nonmiss = xcoord_pcr_ul_np[i, j] xcoord_pcr_ur_np_nonmiss = xcoord_pcr_lr_np[i, j] ycoord_pcr_lr_np_nonmiss = ycoord_pcr_lr_np[i, j] ycoord_pcr_ul_np_nonmiss = ycoord_pcr_ul_np[i, j] ycoord_pcr_ll_np_nonmiss = ycoord_pcr_lr_np[i, j] ycoord_pcr_ur_np_nonmiss = ycoord_pcr_ul_np[i, j] xcoord_pcr_centr_np_nonmiss = xcoord_pcr_centr_np[i, j] ycoord_pcr_centr_np_nonmiss = ycoord_pcr_centr_np[i, j] # empty collection for polygons ll = zip(xcoord_pcr_ll_np_nonmiss, ycoord_pcr_ll_np_nonmiss) lr = zip(xcoord_pcr_lr_np_nonmiss, ycoord_pcr_lr_np_nonmiss) ur = zip(xcoord_pcr_ur_np_nonmiss, ycoord_pcr_ur_np_nonmiss) ul = zip(xcoord_pcr_ul_np_nonmiss, ycoord_pcr_ul_np_nonmiss) # wrap all cell coordinates into a list of lists (one list per cell, with multiple tuples per cell corner) all_cell_coords_pcr = [[ll[i], lr[i], ur[i], ul[i]] for i in range(len(ll))] return all_cell_coords_pcr
def evaluateAllModelResults(self,globalCloneMapFileName,\ catchmentClassFileName,\ lddMapFileName,\ cellAreaMapFileName,\ pcrglobwb_output,\ analysisOutputDir="",\ tmpDir = "/dev/shm/edwin_grdc_"): # output directory for all analyses for all stations analysisOutputDir = str(analysisOutputDir) self.chartOutputDir = analysisOutputDir+"/chart/" self.tableOutputDir = analysisOutputDir+"/table/" # if analysisOutputDir == "": self.chartOutputDir = "chart/" if analysisOutputDir == "": self.tableOutputDir = "table/" # # make the chart and table directories: os.system('rm -r '+self.chartOutputDir+"*") os.system('rm -r '+self.tableOutputDir+"*") os.makedirs(self.chartOutputDir) os.makedirs(self.tableOutputDir) # cloneMap for all pcraster operations pcr.setclone(globalCloneMapFileName) cloneMap = pcr.boolean(1) self.cell_size_in_arc_degree = vos.getMapAttributesALL(globalCloneMapFileName)['cellsize'] lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName)) cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName)) # The landMaskClass map contains the nominal classes for all landmask regions. landMaskClass = pcr.nominal(cloneMap) # default: if catchmentClassFileName is not given if catchmentClassFileName != None: landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName)) # model catchment areas and cordinates catchmentAreaAll = pcr.catchmenttotal(cellArea, lddMap) / (1000*1000) # unit: km2 xCoordinate = pcr.xcoordinate(cloneMap) yCoordinate = pcr.ycoordinate(cloneMap) print "Jaaaaaa" for id in self.list_of_grdc_ids: logger.info("Evaluating simulated discharge to the grdc observation at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".") # identify model pixel self.identifyModelPixel(tmpDir,catchmentAreaAll,landMaskClass,xCoordinate,yCoordinate,str(id)) # evaluate model results to GRDC data self.evaluateModelResultsToGRDC(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir) # write the summary to a table summary_file = analysisOutputDir+"summary.txt" # logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".") # # prepare the file: summary_file_handle = open(summary_file,"w") # # write the header summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n") # # write the content for id in self.list_of_grdc_ids: rowLine = "" for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";" rowLine = rowLine[0:-1] + "\n" summary_file_handle.write(rowLine) summary_file_handle.close()
def evaluateAllModelResults(self,globalCloneMapFileName,\ catchmentClassFileName,\ lddMapFileName,\ cellAreaMapFileName,\ pcrglobwb_output,\ analysisOutputDir="",\ tmpDir = None): # temporary directory if tmpDir == None: tmpDir = self.tmpDir+"/edwin_grdc_" # output directory for all analyses for all stations analysisOutputDir = str(analysisOutputDir) self.chartOutputDir = analysisOutputDir+"/chart/" self.tableOutputDir = analysisOutputDir+"/table/" # if analysisOutputDir == "": self.chartOutputDir = "chart/" if analysisOutputDir == "": self.tableOutputDir = "table/" # # make the chart and table directories: os.system('rm -r '+self.chartOutputDir+"*") os.system('rm -r '+self.tableOutputDir+"*") os.makedirs(self.chartOutputDir) os.makedirs(self.tableOutputDir) # cloneMap for all pcraster operations pcr.setclone(globalCloneMapFileName) cloneMap = pcr.boolean(1) self.cell_size_in_arc_degree = vos.getMapAttributesALL(globalCloneMapFileName)['cellsize'] lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName)) cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName)) # The landMaskClass map contains the nominal classes for all landmask regions. landMaskClass = pcr.nominal(cloneMap) # default: if catchmentClassFileName is not given if catchmentClassFileName != None: landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName)) # model catchment areas and cordinates catchmentAreaAll = pcr.catchmenttotal(cellArea, lddMap) / (1000*1000) # unit: km2 xCoordinate = pcr.xcoordinate(cloneMap) yCoordinate = pcr.ycoordinate(cloneMap) for id in self.list_of_grdc_ids: logger.info("Evaluating simulated discharge to the grdc observation at "+str(self.attributeGRDC["id_from_grdc"][str(id)])+".") # identify model pixel self.identifyModelPixel(tmpDir,catchmentAreaAll,landMaskClass,xCoordinate,yCoordinate,str(id)) # evaluate model results to GRDC data self.evaluateModelResultsToGRDC(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir) # write the summary to a table summary_file = analysisOutputDir+"summary.txt" # logger.info("Writing the summary for all stations to the file: "+str(summary_file)+".") # # prepare the file: summary_file_handle = open(summary_file,"w") # # write the header summary_file_handle.write( ";".join(self.grdc_dict_keys)+"\n") # # write the content for id in self.list_of_grdc_ids: rowLine = "" for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + ";" rowLine = rowLine[0:-1] + "\n" summary_file_handle.write(rowLine) summary_file_handle.close()
def __init__(self, iniItems, landmask, spinUp): object.__init__(self) self.cloneMap = iniItems.cloneMap self.tmpDir = iniItems.tmpDir self.inputDir = iniItems.globalOptions['inputDir'] # landmask/area of interest self.landmask = landmask if iniItems.globalOptions['landmask'] != "None": self.landmask = vos.readPCRmapClone(\ iniItems.globalOptions['landmask'], self.cloneMap,self.tmpDir,self.inputDir) self.preFileNC = iniItems.meteoOptions[ 'precipitationNC'] # starting from 19 Feb 2014, we only support netcdf input files self.tmpFileNC = iniItems.meteoOptions['temperatureNC'] self.refETPotMethod = iniItems.meteoOptions['referenceETPotMethod'] if self.refETPotMethod == 'Hamon': self.latitudes = \ pcr.ycoordinate(self.cloneMap) # needed to calculate 'referenceETPot' if self.refETPotMethod == 'Input': self.etpFileNC = \ iniItems.meteoOptions['refETPotFileNC'] # daily time step self.usingDailyTimeStepForcingData = False if iniItems.timeStep == 1.0 and iniItems.timeStepUnit == "day": self.usingDailyTimeStepForcingData = True # forcing downscaling options: self.forcingDownscalingOptions(iniItems) self.report = True try: self.outDailyTotNC = iniItems.meteoOptions['outDailyTotNC'].split( ",") self.outMonthTotNC = iniItems.meteoOptions['outMonthTotNC'].split( ",") self.outMonthAvgNC = iniItems.meteoOptions['outMonthAvgNC'].split( ",") self.outMonthEndNC = iniItems.meteoOptions['outMonthEndNC'].split( ",") self.outAnnuaTotNC = iniItems.meteoOptions['outAnnuaTotNC'].split( ",") self.outAnnuaAvgNC = iniItems.meteoOptions['outAnnuaAvgNC'].split( ",") self.outAnnuaEndNC = iniItems.meteoOptions['outAnnuaEndNC'].split( ",") except: self.report = False if self.report == True: # daily output in netCDF files: self.outNCDir = iniItems.outNCDir self.netcdfObj = PCR2netCDF(iniItems) # if self.outDailyTotNC[0] != "None": for var in self.outDailyTotNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_dailyTot.nc",\ var,"undefined") # MONTHly output in netCDF files: # - cummulative if self.outMonthTotNC[0] != "None": for var in self.outMonthTotNC: # initiating monthlyVarTot (accumulator variable): vars(self)[var + 'MonthTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthTot.nc",\ var,"undefined") # - average if self.outMonthAvgNC[0] != "None": for var in self.outMonthAvgNC: # initiating monthlyTotAvg (accumulator variable) vars(self)[var + 'MonthTot'] = None # initiating monthlyVarAvg: vars(self)[var + 'MonthAvg'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthAvg.nc",\ var,"undefined") # - last day of the month if self.outMonthEndNC[0] != "None": for var in self.outMonthEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_monthEnd.nc",\ var,"undefined") # YEARly output in netCDF files: # - cummulative if self.outAnnuaTotNC[0] != "None": for var in self.outAnnuaTotNC: # initiating yearly accumulator variable: vars(self)[var + 'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaTot.nc",\ var,"undefined") # - average if self.outAnnuaAvgNC[0] != "None": for var in self.outAnnuaAvgNC: # initiating annualyVarAvg: vars(self)[var + 'AnnuaAvg'] = None # initiating annualyTotAvg (accumulator variable) vars(self)[var + 'AnnuaTot'] = None # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaAvg.nc",\ var,"undefined") # - last day of the year if self.outAnnuaEndNC[0] != "None": for var in self.outAnnuaEndNC: # creating the netCDF files: self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \ str(var)+"_annuaEnd.nc",\ var,"undefined")