def cell_padding(input, output, radius=3): """Mitigates edge effect by growing an input raster map by radius cells Args ---- input, output : str Names of GRASS raster map for input, and padded output radius : int Radius in which to expand region and grow raster Returns ------- input_grown : str GRASS raster map which has been expanded by radius cells""" region = Region() g.region(n=region.north + (region.nsres * radius), s=region.south - (region.nsres * radius), w=region.west - (region.ewres * radius), e=region.east + (region.ewres * radius)) r.grow(input=input, output=output, radius=radius+1, quiet=True) return (region)
def main(): g.message("Pocitam NDVI...") # nastavit region g.region(rast=options['tm4']) # vypocitat NDVI r.mapcalc('ndvi = float({y} - {x}) / ({y} + {x})'.format(x=options['tm3'], y=options['tm4']), overwrite=True) # r.reclass podporuje pouze datovy typ CELL r.mapcalc('temp1 = 100 * ndvi', overwrite=True) g.message("Reklasifikuji data...") # reklasifikovat data reclass_rules = """-100 thru 5 = 1 bez vegetace, vodni plochy 5 thru 35 = 2 plochy s minimalni vegetaci 35 thru 100 = 3 plochy pokryte vegetaci""" r.reclass(overwrite=True, rules='-', input='temp1', output='r_ndvi', stdin_=reclass_rules) # nastavit tabulku barev color_rules = """1 red 2 yellow 3 0 136 26""" r.colors(quiet=True, map='r_ndvi', rules='-', stdin_=color_rules) # vytiskout zakladni charakteristiku dat r.report(map='r_ndvi', units=['c', 'p', 'h'])
def main(): g.message("Pocitam NDVI...") # nastavit region g.region(rast=options['tm4']) # vypocitat NDVI r.mapcalc('ndvi = float({y} - {x}) / ({y} + {x})'.format(x=options['tm3'], y=options['tm4']), overwrite = True) # r.reclass podporuje pouze datovy typ CELL r.mapcalc('temp1 = 100 * ndvi', overwrite = True) g.message("Reklasifikuji data...") # reklasifikovat data reclass_rules = """-100 thru 5 = 1 bez vegetace, vodni plochy 5 thru 35 = 2 plochy s minimalni vegetaci 35 thru 100 = 3 plochy pokryte vegetaci""" r.reclass(overwrite = True, rules = '-', input = 'temp1', output = 'r_ndvi', stdin_ = reclass_rules) # nastavit tabulku barev color_rules = """1 red 2 yellow 3 0 136 26""" r.colors(quiet = True, map = 'r_ndvi', rules = '-', stdin_ = color_rules) # vytiskout zakladni charakteristiku dat r.report(map = 'r_ndvi', units = ['c', 'p', 'h'])
def cell_padding(input, output, radius=3): """Mitigates edge effect by growing an input raster map by radius cells Args ---- input, output : str Names of GRASS raster map for input, and padded output radius : int Radius in which to expand region and grow raster Returns ------- input_grown : str GRASS raster map which has been expanded by radius cells""" region = Region() g.region(n=region.north + (region.nsres * radius), s=region.south - (region.nsres * radius), w=region.west - (region.ewres * radius), e=region.east + (region.ewres * radius)) r.grow(input=input, output=output, radius=radius+1, quiet=True) return (region)
def main(): """ Creates a hydrologically correct MODFLOW grid that inlcudes minimum DEM elevations for all stream cells and mean elevations everywhere else """ """ dem = 'DEM' grid = 'grid_tmp' streams = 'streams_tmp' streams_MODFLOW = 'streams_tmp_MODFLOW' DEM_MODFLOW = 'DEM_coarse' resolution = 500 """ options, flags = gscript.parser() dem = options['dem'] grid = options['grid'] streams = options['streams'] #resolution = float(options['resolution']) streams_MODFLOW = options['streams_modflow'] DEM_MODFLOW = options['dem_modflow'] # Get number of rows and columns colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(grid, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() rows = colValues[:, colNames == 'row'].astype(int).squeeze() cols = colValues[:, colNames == 'col'].astype(int).squeeze() nRows = np.max(rows) nCols = np.max(cols) gscript.use_temp_region() # Set the region to capture only the channel g.region(raster=dem) v.to_rast(input=streams, output=streams_MODFLOW, use='val', value=1.0, type='line', overwrite=gscript.overwrite(), quiet=True) r.mapcalc('tmp' + " = " + streams_MODFLOW + " * " + dem, overwrite=True) g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True) g.region(vector=grid, rows=nRows, cols=nCols, quiet=True) r.resamp_stats(input=streams_MODFLOW, output=streams_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.resamp_stats(input=dem, output=DEM_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW, output=DEM_MODFLOW, overwrite=True, quiet=True)
def start_up(): """Prepare input raster and vector data """ # czech republic, resolution 1km g.region(flags="d", res='1000') r.random_surface(output='inputraster1') r.random_surface(output='inputraster2')
def reg_2deg(input = '', size = 2): # get region center reg = grass.parse_command('g.region', input = input, flags = 'c') lat = float() lon = float() g.region(n = lat + size/2, s = lat - size/2, w = lon - size/2, e = lon + size/2, align = input, flags = 'ap')
def cleanup(): gs.message("Deleting intermediate files...") for f in TMP_RAST: gs.run_command( "g.remove", type="raster", name=f, flags='bf', quiet=True) g.region(**current_reg) if mask_test: r.mask(original_mask, overwrite=True, quiet=True)
def main(): """ Creates a hydrologically correct MODFLOW grid that inlcudes minimum DEM elevations for all stream cells and mean elevations everywhere else """ """ dem = 'DEM' grid = 'grid_tmp' streams = 'streams_tmp' streams_MODFLOW = 'streams_tmp_MODFLOW' DEM_MODFLOW = 'DEM_coarse' resolution = 500 """ options, flags = gscript.parser() dem = options['dem'] grid = options['grid'] streams = options['streams'] #resolution = float(options['resolution']) streams_MODFLOW = options['streams_modflow'] DEM_MODFLOW = options['dem_modflow'] gscript.use_temp_region() # Set the region to capture only the channel g.region(raster=dem) v.to_rast(input=streams, output=streams_MODFLOW, use='val', value=1.0, type='line', overwrite=gscript.overwrite(), quiet=True) r.mapcalc('tmp' + " = " + streams_MODFLOW + " * " + dem, overwrite=True) g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True) g.region(raster=DEM_MODFLOW, quiet=True) print "ALTERED" r.resamp_stats(input=streams_MODFLOW, output=streams_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.resamp_stats(input=dem, output=DEM_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW, output=DEM_MODFLOW, overwrite=True, quiet=True)
def compute_solar_irradiation(inputFile, outputFile, day_of_year, crs='32630'): # Define grass working set GRASS_GISDB = 'grassdata' GRASS_LOCATION = 'GEOPROCESSING' GRASS_MAPSET = 'PERMANENT' GRASS_ELEVATIONS_FILENAME = 'ELEVATIONS' os.environ.update(dict(GRASS_COMPRESS_NULLS='1')) # Clean previously processed data if os.path.isdir(GRASS_GISDB): shutil.rmtree(GRASS_GISDB) with Session(gisdb=GRASS_GISDB, location=GRASS_LOCATION, mapset=GRASS_MAPSET, create_opts='EPSG:32630') as ses: # Set project projection to match elevation raster projection g.proj(epsg=crs, flags='c') # Load raster file into working directory r.import_(input=inputFile, output=GRASS_ELEVATIONS_FILENAME, flags='o') # Set project region to match raster region g.region(raster=GRASS_ELEVATIONS_FILENAME, flags='s') # Calculate solar irradiation gscript.run_command('r.slope.aspect', elevation=GRASS_ELEVATIONS_FILENAME, slope='slope', aspect='aspect') gscript.run_command('r.sun', elevation=GRASS_ELEVATIONS_FILENAME, slope='slope', aspect='aspect', beam_rad='beam', step=1, day=day_of_year) # Get extraterrestrial irradiation from history metadata regex = re.compile(r'\d+\.\d+') output = gscript.read_command("r.info", flags="h", map=["beam"]) splits = str(output).split('\n') line = next(filter(lambda line: 'Extraterrestrial' in line, splits)) extraterrestrial_irradiance = float(regex.search(line)[0]) # Export generated results into a GeoTiff file if os.path.isfile(outputFile): os.remove(outputFile) r.out_gdal(input='beam', output=outputFile) return extraterrestrial_irradiance
def setRegion(parcelmap,betriebid): ## set region to parcel layer extent + buffer reg = Region() reg.vect(parcelmap.name) regbuffer = 100 reg.north += regbuffer reg.east += regbuffer reg.south -= regbuffer reg.west -= regbuffer reg.set_current() # set_current() not working right now # so using g.region() : g.region(n=str(reg.north), s=str(reg.south), w=str(reg.west), e=str(reg.east), res='2', flags='a',quiet=quiet) g.region(save='B'+betriebid,overwrite=True,quiet=quiet)
def worker(src): window, src = src window = dict(window) window['n'] = window.pop('north') window['s'] = window.pop('south') window['e'] = window.pop('east') window['w'] = window.pop('west') del (window['top']) del (window['bottom']) g.region(**window) with RasterRow(src) as rs: arr = np.asarray(rs) return (arr)
def initGrassSetup(userDemDir, userid, lat, lon, filename=options['demname'] + '.tif'): # redudant conections to grass r, g, gscript = connect2grass(userid) # r.in.gdal input=/home/justin/Documents/ucmi/geodata/zip/tempEPSG3857/tile.tif output=tile gscript.run_command( 'r.in.gdal', input=userDemDir + filename, output=filename[:-4], overwrite=True, ) g.region(raster=filename[:-4] + '@' + str(userid)) g.region(flags='p') # remove old viewsheds g.remove(flags='fb', type='raster', pattern='viewshed*')
def firsttimeGRASS(infiles, adminfile, maskfile): """ Run a maxlikelihood unsupervised classification on the data nclasses: number of expected classes infiles: list of raster files to import and process firstime: if firsttime, it will import all files in GRASS """ from grass_session import Session from grass.script import core as gcore from grass.pygrass.modules.shortcuts import raster as r from grass.pygrass.modules.shortcuts import vector as v from grass.pygrass.modules.shortcuts import general as g from grass.pygrass.modules.shortcuts import imagery as i # create a new location from EPSG code (can also be a GeoTIFF or SHP or ... file) with Session(gisdb="/tmp", location="loc", create_opts="EPSG:4326"): # First run, needs to import the files and create a mask # Import admin boundary #v.import_(input=adminfile,output="admin",quiet=True,superquiet=True) gcore.parse_command("v.import", input=adminfile, output="admin", quiet=True) # Set computational region to admin boundary g.region(flags="s", vector="admin", quiet=True) # Keep only file name for output outmf = maskfile.split("/")[-1] # Import Mask file r.in_gdal(input=maskfile, output=outmf, quiet=True) # Apply Mask r.mask(raster=outmf, maskcats="0", quiet=True) # Set computational resolution to mask pixel size g.region(flags="s", raster=outmf, quiet=True) # Import files for f in infiles: # Keep only file name for output outf = f.split("/")[-1] # Landsat files not in Geo lat long needs reproj on import #r.import_(input=f,output=outf,quiet=True) gcore.parse_command("r.import", input=f, output=outf, quiet=True) # Create group i.group(group="l8", subgroup="l8", input=outf, quiet=True)
def cell_padding(input, output, radius=3): """Mitigates edge effect by growing an input raster map by radius cells Parameters ---------- input, output : str Names of GRASS raster map for input, and padded output. radius : int Radius in which to expand region and grow raster. Returns ------- input_grown : str GRASS raster map which has been expanded by radius cells """ g.region(grow=radius) r.grow(input=input, output=output, radius=radius, quiet=True) region = Region() return region
def main(): """ Creates a hydrologically correct MODFLOW grid that inlcudes minimum DEM elevations for all stream cells and mean elevations everywhere else """ """ dem = 'DEM' grid = 'grid_tmp' streams = 'streams_tmp' streams_MODFLOW = 'streams_tmp_MODFLOW' DEM_MODFLOW = 'DEM_coarse' resolution = 500 """ options, flags = gscript.parser() dem = options['dem'] grid = options['grid'] streams = options['streams'] resolution = float(options['resolution']) streams_MODFLOW = options['streams_modflow'] DEM_MODFLOW = options['dem_modflow'] gscript.use_temp_region() g.region(raster=dem) g.region(vector=grid) v.to_rast(input=streams, output=streams_MODFLOW, use='val', value=1.0, type='line', overwrite=gscript.overwrite(), quiet=True) r.mapcalc(streams_MODFLOW + " = " + streams_MODFLOW + " * DEM", overwrite=True) g.region(res=resolution, quiet=True) r.resamp_stats(input=streams_MODFLOW, output=streams_MODFLOW, method='minimum', overwrite=gscript.overwrite(), quiet=True) r.resamp_stats(input=dem, output=DEM_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW, output=DEM_MODFLOW, overwrite=True, quiet=True)
aspect = 'aspect' # Topographic aspect HRUs = 'HRUs' # Hydrologic response units gravity_reservoirs = 'gravity_reservoirs' # Connect HRUs to MODFLOW grid basin_mask = 'basin_mask' # Mask out non-study-basin cells pour_point = 'pour_point' # Outlet pour point bc_cell = 'bc_cell' # Grid cell for MODFLOW b.c. # Import DEM if required # And perform the standard starting tasks. # These take time, so skip if not needed if Settings.DEM_input != '': # Import DEM and set region r.in_gdal(input=Settings.DEM_input, output=DEM_original_import, overwrite=True) g.region(raster=DEM_original_import) # Build flow accumulation with only fully on-map flow # Cell areas r.cell_area(output=cellArea_meters2, units='m2', overwrite=True) # Flow weights (e.g., precipitation # Test first if it is an existing raster; if not, import rastersAll = np.array( list(set(list(gscript.parse_command('g.list', type='raster'))))) if Settings.flow_weights in rastersAll: # NOTE: Here, this might not necessarily be called "flow_weights" r.mapcalc(flow + ' = ' + cellArea_meters2 * Settings.flow_weights, overwrite=True) else: r.in_gdal(input=Settings.flow_weights, output=flow_weights, overwrite=True)
margin_top = 2110 margin_bottom = 410 margin_left = 300 margin_right = 3700 # Existing full-extent DEMs DEMs_fullextent = gscript.parse_command( 'g.list', type='raster', pattern='DEM_fullextent_0??????').keys() DEMs_fullextent = sorted(DEMs_fullextent) # x and y values across basin and more length_y_trimmed = margin_top - margin_bottom length_x_trimmed = margin_right - margin_left # Full-extent region: get x and y g.region(raster=DEMs_fullextent[0]) try: r.mapcalc('x = x()') r.mapcalc('y = y()') except: pass g.region(flags='d') # Set region to limited area g.region(n=margin_top / 1000., s=margin_bottom / 1000., w=margin_left / 1000., e=margin_right / 1000., res=0.001) reg = gscript.region()
# region aligned to this map map_for_define_region = 'Neotropic_Hansen_percenttreecoverd_2000_wgs84@PERMANENT' # input vector with buffers vector = 'buffers_5km_comm_data_neotro_checked_2020_d11_06' # For each buffer for i in buffer_index: print i, comm_code[i], years[i] # select feature v.extract(input = vector, output = 'vector_cat', where = 'cat = ' + str(i+1), flags = 't', overwrite = True, quiet = True) # define region g.region(vector = 'vector_cat', align = map_for_define_region, flags = 'p') # use vector as a mask r.mask(vector = 'vector_cat', overwrite = True, quiet = True) # Cut maps # tree cover with zero where there was deforestation expr = comm_code[i] + '_treecover_GFW_2000_deforestation = if(Neotropical_Hansen_treecoverlossperyear_wgs84_2017@PERMANENT > 0 && '+ \ 'Neotropical_Hansen_treecoverlossperyear_wgs84_2017@PERMANENT < ' + str(years[i]) + ', 0, Neotropic_Hansen_percenttreecoverd_2000_wgs84@PERMANENT)' r.mapcalc(expr, overwrite = True) # thresholds for binary values of natural vegetation thresholds = [70, 80, 90] # loop to cut for each one and account for deforestation for tr in thresholds:
def compute_supply( base, recreation_spectrum, highest_spectrum, base_reclassification_rules, reclassified_base, reclassified_base_title, flow, flow_map_name, aggregation, ns_resolution, ew_resolution, print_only=False, flow_column_name=None, vector=None, supply_filename=None, use_filename=None, ): """ Algorithmic description of the "Contribution of Ecosysten Types" # FIXME ''' 1 B ← {0, .., m-1} : Set of aggregational boundaries 2 T ← {0, .., n-1} : Set of land cover types 3 WE ← 0 : Set of weighted extents 4 R ← 0 : Set of fractions 5 F ← 0 6 MASK ← HQR : High Quality Recreation 7 foreach {b} ⊆ B do : for each aggregational boundary 'b' 8 RB ← 0 9 foreach {t} ⊆ T do : for each Land Type 10 WEt ← Et * Wt : Weighted Extent = Extent(t) * Weight(t) 11 WE ← WE⋃{WEt} : Add to set of Weighted Extents 12 S ← ∑t∈WEt 13 foreach t ← T do 14 Rt ← WEt / ∑WE 15 R ← R⋃{Rt} 16 RB ← RB⋃{R} ''' # FIXME Parameters ---------- recreation_spectrum: Map scoring access to and quality of recreation highest_spectrum : Expected is a map of areas with highest recreational value (category 9 as per the report ... ) base : Base land types map for final zonal statistics. Specifically to ESTIMAP's recrceation mapping algorithm base_reclassification_rules : Reclassification rules for the input base map reclassified_base : Name for the reclassified base cover map reclassified_base_title : Title for the reclassified base map ecosystem_types : flow : Map of visits, derived from the mobility function, depicting the number of people living inside zones 0, 1, 2, 3. Used as a cover map for zonal statistics. flow_map_name : A name for the 'flow' map. This is required when the 'flow' input option is not defined by the user, yet some of the requested outputs required first the production of the 'flow' map. An example is the request for a supply table without requesting the 'flow' map itself. aggregation : ns_resolution : ew_resolution : statistics_filename : supply_filename : Name for CSV output file of the supply table use_filename : Name for CSV output file of the use table flow_column_name : Name for column to populate with 'flow' values vector : If 'vector' is given, a vector map of the 'flow' along with appropriate attributes will be produced. ? : Land cover class percentages in ROS9 (this is: relative percentage) output : Supply table (distribution of flow for each land cover class) Returns ------- This function produces a map to base the production of a supply table in form of CSV. Examples -------- """ # Inputs flow_in_base = flow + "_" + base base_scores = base + ".scores" # Define lists and dictionaries to hold intermediate data statistics_dictionary = {} weighted_extents = {} flows = [] # MASK areas of high quality recreation r.mask(raster=highest_spectrum, overwrite=True, quiet=True) # Reclassify land cover map to MAES ecosystem types r.reclass( input=base, rules=base_reclassification_rules, output=reclassified_base, quiet=True, ) # add to "remove_at_exit" after the reclassified maps! # Discard areas out of MASK copy_equation = EQUATION.format(result=reclassified_base, expression=reclassified_base) r.mapcalc(copy_equation, overwrite=True) # Count flow within each land cover category r.stats_zonal( base=base, flags="r", cover=flow_map_name, method="sum", output=flow_in_base, overwrite=True, quiet=True, ) # Set colors for "flow" map r.colors(map=flow_in_base, color=MOBILITY_COLORS, quiet=True) # Parse aggregation raster categories and labels categories = grass.parse_command("r.category", map=aggregation, delimiter="\t") for category in categories: # Intermediate names cells = highest_spectrum + ".cells" + "." + category remove_map_at_exit(cells) extent = highest_spectrum + ".extent" + "." + category remove_map_at_exit(extent) weighted = highest_spectrum + ".weighted" + "." + category remove_map_at_exit(weighted) fractions = base + ".fractions" + "." + category remove_map_at_exit(fractions) flow_category = "_flow_" + category flow = base + flow_category remove_map_at_exit(flow) flow_in_reclassified_base = reclassified_base + "_flow" flow_in_category = reclassified_base + flow_category flows.append(flow_in_category) # add to list for patching remove_map_at_exit(flow_in_category) # Output names msg = "Processing aggregation raster category: {r}" msg = msg.format(r=category) grass.debug(_(msg)) # g.message(_(msg)) # First, set region to extent of the aggregation map # and resolution to the one of the population map # Note the `-a` flag to g.region: ? # To safely modify the region: grass.use_temp_region() # FIXME g.region( raster=aggregation, nsres=ns_resolution, ewres=ew_resolution, flags="a", quiet=True, ) msg = "|! Computational resolution matched to {raster}" msg = msg.format(raster=aggregation) grass.debug(_(msg)) # Build MASK for current category & high quality recreation areas msg = "Setting category '{c}' of '{a}' as a MASK" grass.verbose(_(msg.format(c=category, a=aggregation))) masking = "if( {spectrum} == {highest_quality_category} && " masking += "{aggregation} == {category}, " masking += "1, null() )" masking = masking.format( spectrum=recreation_spectrum, highest_quality_category=HIGHEST_RECREATION_CATEGORY, aggregation=aggregation, category=category, ) masking_equation = EQUATION.format(result="MASK", expression=masking) grass.mapcalc(masking_equation, overwrite=True) # zoom to MASK g.region(zoom="MASK", nsres=ns_resolution, ewres=ew_resolution, quiet=True) # Count number of cells within each land category r.stats_zonal( flags="r", base=base, cover=highest_spectrum, method="count", output=cells, overwrite=True, quiet=True, ) cells_categories = grass.parse_command("r.category", map=cells, delimiter="\t") grass.debug(_("Cells: {c}".format(c=cells_categories))) # Build cell category and label rules for `r.category` cells_rules = "\n".join([ "{0}:{1}".format(key, value) for key, value in cells_categories.items() ]) # Discard areas out of MASK copy_equation = EQUATION.format(result=cells, expression=cells) r.mapcalc(copy_equation, overwrite=True) # Reassign cell category labels r.category(map=cells, rules="-", stdin=cells_rules, separator=":") # Compute extent of each land category extent_expression = "@{cells} * area()" extent_expression = extent_expression.format(cells=cells) extent_equation = EQUATION.format(result=extent, expression=extent_expression) r.mapcalc(extent_equation, overwrite=True) # Write extent figures as labels r.stats_zonal( flags="r", base=base, cover=extent, method="average", output=extent, overwrite=True, verbose=False, quiet=True, ) # Write land suitability scores as an ASCII file temporary_reclassified_base_map = temporary_filename( filename=reclassified_base) suitability_scores_as_labels = string_to_file( SUITABILITY_SCORES_LABELS, filename=temporary_reclassified_base_map) remove_files_at_exit(suitability_scores_as_labels) # Write scores as raster category labels r.reclass( input=base, output=base_scores, rules=suitability_scores_as_labels, overwrite=True, quiet=True, verbose=False, ) remove_map_at_exit(base_scores) # Compute weighted extents weighted_expression = "@{extent} * float(@{scores})" weighted_expression = weighted_expression.format(extent=extent, scores=base_scores) weighted_equation = EQUATION.format(result=weighted, expression=weighted_expression) r.mapcalc(weighted_equation, overwrite=True) # Write weighted extent figures as labels r.stats_zonal( flags="r", base=base, cover=weighted, method="average", output=weighted, overwrite=True, verbose=False, quiet=True, ) # Get weighted extents in a dictionary weighted_extents = grass.parse_command("r.category", map=weighted, delimiter="\t") # Compute the sum of all weighted extents and add to dictionary category_sum = sum([ float(x) if not math.isnan(float(x)) else 0 for x in weighted_extents.values() ]) weighted_extents["sum"] = category_sum # Create a map to hold fractions of each weighted extent to the sum # See also: # https://grasswiki.osgeo.org/wiki/LANDSAT#Hint:_Minimal_disk_space_copies r.reclass( input=base, output=fractions, rules="-", stdin="*=*", verbose=False, quiet=True, ) # Compute weighted fractions of land types fraction_category_label = { key: float(value) / weighted_extents["sum"] for (key, value) in weighted_extents.iteritems() if key is not "sum" } # Build fraction category and label rules for `r.category` fraction_rules = "\n".join([ "{0}:{1}".format(key, value) for key, value in fraction_category_label.items() ]) # Set rules r.category(map=fractions, rules="-", stdin=fraction_rules, separator=":") # Assert that sum of fractions is ~1 fraction_categories = grass.parse_command("r.category", map=fractions, delimiter="\t") fractions_sum = sum([ float(x) if not math.isnan(float(x)) else 0 for x in fraction_categories.values() ]) msg = "Fractions: {f}".format(f=fraction_categories) grass.debug(_(msg)) # g.message(_("Sum: {:.17g}".format(fractions_sum))) assert abs(fractions_sum - 1) < 1.0e-6, "Sum of fractions is != 1" # Compute flow flow_expression = "@{fractions} * @{flow}" flow_expression = flow_expression.format(fractions=fractions, flow=flow_in_base) flow_equation = EQUATION.format(result=flow, expression=flow_expression) r.mapcalc(flow_equation, overwrite=True) # Write flow figures as raster category labels r.stats_zonal( base=reclassified_base, flags="r", cover=flow, method="sum", output=flow_in_category, overwrite=True, verbose=False, quiet=True, ) # Parse flow categories and labels flow_categories = grass.parse_command("r.category", map=flow_in_category, delimiter="\t") grass.debug(_("Flow: {c}".format(c=flow_categories))) # Build flow category and label rules for `r.category` flow_rules = "\n".join([ "{0}:{1}".format(key, value) for key, value in flow_categories.items() ]) # Discard areas out of MASK # Check here again! # Output patch of all flow maps? copy_equation = EQUATION.format(result=flow_in_category, expression=flow_in_category) r.mapcalc(copy_equation, overwrite=True) # Reassign cell category labels r.category(map=flow_in_category, rules="-", stdin=flow_rules, separator=":") # Update title reclassified_base_title += " " + category r.support(flow_in_category, title=reclassified_base_title) # debugging # r.report( # flags='hn', # map=(flow_in_category), # units=('k','c','p'), # ) if print_only: r.stats( input=(flow_in_category), output="-", flags="nacpl", separator=COMMA, quiet=True, ) if not print_only: if flow_column_name: flow_column_prefix = flow_column_name + category else: flow_column_name = "flow" flow_column_prefix = flow_column_name + category # Produce vector map(s) if vector: # The following is wrong # update_vector(vector=vector, # raster=flow_in_category, # methods=METHODS, # column_prefix=flow_column_prefix) # What can be done? # Maybe update columns of an existing map from the columns of # the following vectorised raster map(s) # ? raster_to_vector(raster=flow_in_category, vector=flow_in_category, type="area") # get statistics dictionary = get_raster_statistics( map_one=aggregation, # reclassified_base map_two=flow_in_category, separator="|", flags="nlcap", ) # merge 'dictionary' with global 'statistics_dictionary' statistics_dictionary = merge_two_dictionaries( statistics_dictionary, dictionary) # It is important to remove the MASK! r.mask(flags="r", quiet=True) # FIXME # Add "reclassified_base" map to "remove_at_exit" here, so as to be after # all reclassified maps that derive from it # remove the map 'reclassified_base' # g.remove(flags='f', type='raster', name=reclassified_base, quiet=True) # remove_map_at_exit(reclassified_base) if not print_only: r.patch(flags="", input=flows, output=flow_in_reclassified_base, quiet=True) if vector: # Patch all flow vector maps in one v.patch( flags="e", input=flows, output=flow_in_reclassified_base, overwrite=True, quiet=True, ) # export to csv if supply_filename: supply_filename += CSV_EXTENSION nested_dictionary_to_csv(supply_filename, statistics_dictionary) if use_filename: use_filename += CSV_EXTENSION uses = compile_use_table(statistics_dictionary) dictionary_to_csv(use_filename, uses) # Maybe return list of flow maps? Requires unique flow map names return flows
## for testing and debugging I strongly suggest using this resource: ## http://grasswiki.osgeo.org/wiki/GRASS_and_Python ## usage: python importraster.py input=geoTiff output=raster ## description: Creates a raster file and sets the g.region for ## Concurrent raster calculations ####################################################################### import os import tempfile from grass.pygrass.modules.shortcuts import general as g from grass.pygrass.modules.shortcuts import raster as r from grass.pygrass.modules import Module # python importraster.py input=tiff output=raster for arg in sys.argv: mySplit = arg.split('=') if len(mySplit) > 1: command = mySplit[0] value = mySplit[1] if command = "input": myInput = value if command = "output": myOutput = value r.external(input=myInput, output=myOutput) g.region(rast=myOutput)
from grass.pygrass.modules.shortcuts import vector as v #from grass.pygrass.modules.shortcuts import temporal as t from grass.pygrass.modules.grid import GridModule file = '/appl/data/geo/mml/dem10m/2019/W3/W33/W3331.tif' grassfile = 'W3331' grasscontoursfile = 'W3331_contours' contoursfile = "/scratch/project_2000599/grass/output/V4132.gpkg" cpus = 4 # Register external GeoTIFF in current mapset: r.external(input=file, output=grassfile, flags="e", overwrite=True) # Set GRASS region g.region(raster=grassfile) #Perform GRASS analysis, here calculate contours from DEM, parallelization with GridModule region = gscript.region() width = region['cols'] // 2 + 1 height = region['rows'] // 2 + 1 grd = GridModule('r.contour', width=width, height=height, overlap=20, processes=cpus, input=grassfile, output=grasscontoursfile, minlevel=200, maxlevel=800,
def main(): """ Builds river reaches for input to the USGS hydrologic model, GSFLOW. These reaches link the PRMS stream segments to the MODFLOW grid cells. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() segments = options['segment_input'] grid = options['grid_input'] reaches = options['output'] elevation = options['elevation'] Smin = options['s_min'] h_stream = options['h_stream'] x1 = options['upstream_easting_column_seg'] y1 = options['upstream_northing_column_seg'] x2 = options['downstream_easting_column_seg'] y2 = options['downstream_northing_column_seg'] tostream = options['tostream_cat_column_seg'] # Hydraulic paramters STRTHICK = options['strthick'] STRHC1 = options['strhc1'] THTS = options['thts'] THTI = options['thti'] EPS = options['eps'] UHC = options['uhc'] # Build reach maps by overlaying segments on grid if len(gscript.find_file(segments, element='vector')['name']) > 0: v.extract(input=segments, output='GSFLOW_TEMP__', type='line', quiet=True, overwrite=True) v.overlay(ainput='GSFLOW_TEMP__', atype='line', binput=grid, output=reaches, operator='and', overwrite=gscript.overwrite(), quiet=True) g.remove(type='vector', name='GSFLOW_TEMP__', quiet=True, flags='f') else: gscript.fatal('No vector file "' + segments + '" found.') # Start editing database table reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') # Rename a,b columns reachesTopo.table.columns.rename('a_' + x1, 'x1') reachesTopo.table.columns.rename('a_' + x2, 'x2') reachesTopo.table.columns.rename('a_' + y1, 'y1') reachesTopo.table.columns.rename('a_' + y2, 'y2') reachesTopo.table.columns.rename('a_NSEG', 'NSEG') reachesTopo.table.columns.rename('a_ISEG', 'ISEG') reachesTopo.table.columns.rename('a_stream_type', 'stream_type') reachesTopo.table.columns.rename('a_type_code', 'type_code') reachesTopo.table.columns.rename('a_cat', 'rnum_cat') reachesTopo.table.columns.rename('a_' + tostream, 'tostream') reachesTopo.table.columns.rename('a_id', 'segment_id') reachesTopo.table.columns.rename('a_OUTSEG', 'OUTSEG') reachesTopo.table.columns.rename('b_row', 'row') reachesTopo.table.columns.rename('b_col', 'col') reachesTopo.table.columns.rename('b_id', 'cell_id') # Drop unnecessary columns cols = reachesTopo.table.columns.names() for col in cols: if (col[:2] == 'a_') or (col[:2] == 'b_'): reachesTopo.table.columns.drop(col) # Add new columns to 'reaches' reachesTopo.table.columns.add('KRCH', 'integer') reachesTopo.table.columns.add('IRCH', 'integer') reachesTopo.table.columns.add('JRCH', 'integer') reachesTopo.table.columns.add('IREACH', 'integer') reachesTopo.table.columns.add('RCHLEN', 'double precision') reachesTopo.table.columns.add('STRTOP', 'double precision') reachesTopo.table.columns.add('SLOPE', 'double precision') reachesTopo.table.columns.add('STRTHICK', 'double precision') reachesTopo.table.columns.add('STRHC1', 'double precision') reachesTopo.table.columns.add('THTS', 'double precision') reachesTopo.table.columns.add('THTI', 'double precision') reachesTopo.table.columns.add('EPS', 'double precision') reachesTopo.table.columns.add('UHC', 'double precision') reachesTopo.table.columns.add('xr1', 'double precision') reachesTopo.table.columns.add('xr2', 'double precision') reachesTopo.table.columns.add('yr1', 'double precision') reachesTopo.table.columns.add('yr2', 'double precision') # Commit columns before editing (necessary?) reachesTopo.table.conn.commit() reachesTopo.close() # Update some columns that can be done now reachesTopo.open('rw') colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(reaches, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) cur = reachesTopo.table.conn.cursor() # Hydrogeologic properties cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK)) cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1)) cur.execute("update " + reaches + " set THTS=" + str(THTS)) cur.execute("update " + reaches + " set THTI=" + str(THTI)) cur.execute("update " + reaches + " set EPS=" + str(EPS)) cur.execute("update " + reaches + " set UHC=" + str(UHC)) # Grid properties cur.execute("update " + reaches + " set KRCH=1") # Top layer: unchangable cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats) cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats) reachesTopo.table.conn.commit() reachesTopo.close() v.to_db(map=reaches, columns='RCHLEN', option='length', quiet=True) # Still to go after these: # STRTOP (added with slope) # IREACH (whole next section dedicated to this) # SLOPE (need z_start and z_end) # Now, the light stuff is over: time to build the reach order v.to_db(map=reaches, option='start', columns='xr1,yr1') v.to_db(map=reaches, option='end', columns='xr2,yr2') # Now just sort by category, find which stream has the same xr1 and yr1 as # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another # starting point and move down the line. # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1" # First, get the starting coordinates of each stream segment # and a set of river ID's (ordered from 1...N) colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(segments, layer=1)['values'].values()) number_of_segments = colValues.shape[0] segment_x1s = colValues[:, colNames == 'x1'].astype(float).squeeze() segment_y1s = colValues[:, colNames == 'y1'].astype(float).squeeze() segment_ids = colValues[:, colNames == 'id'].astype(float).squeeze() # Then move back to the reaches map to produce the ordering colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(reaches, layer=1)['values'].values()) reach_cats = colValues[:, colNames == 'cat'].astype(int).squeeze() reach_x1s = colValues[:, colNames == 'xr1'].astype(float).squeeze() reach_y1s = colValues[:, colNames == 'yr1'].astype(float).squeeze() reach_x2s = colValues[:, colNames == 'xr2'].astype(float).squeeze() reach_y2s = colValues[:, colNames == 'yr2'].astype(float).squeeze() segment_ids__reach = colValues[:, colNames == 'segment_id'].astype( float).squeeze() for segment_id in segment_ids: reach_order_cats = [] downstream_directed = [] ssel = segment_ids == segment_id rsel = segment_ids__reach == segment_id # selector # Find first segment: x1y1 first here, but not necessarily later downstream_directed.append(1) _x_match = reach_x1s[rsel] == segment_x1s[ssel] _y_match = reach_y1s[rsel] == segment_y1s[ssel] _i_match = _x_match * _y_match x1y1 = True # false if x2y2 # Find cat _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_order_cats.append(_cat) # Get end of reach = start of next one reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) while _i_match.any(): _x_match = reach_x1s[rsel] == reach_x_end _y_match = reach_y1s[rsel] == reach_y_end _i_match = _x_match * _y_match if _i_match.any(): _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) reach_order_cats.append(_cat) print len(reach_order_cats), len(reach_cats[rsel]) # Reach order to database table reach_number__reach_order_cats = [] for i in range(len(reach_order_cats)): reach_number__reach_order_cats.append((i + 1, reach_order_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set IREACH=? where cat=?", reach_number__reach_order_cats) reachesTopo.table.conn.commit() reachesTopo.close() # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!! # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END # 2018.10.01: Updating this to use the computational region for the DEM g.region(raster=elevation) # Compute slope and starting elevations from the elevations at the start and # end of the reaches and the length of each reach] gscript.message('Obtaining elevation values from raster: may take time.') v.db_addcolumn(map=reaches, columns='zr1 double precision, zr2 double precision') zr1 = [] zr2 = [] for i in range(len(reach_cats)): _x = reach_x1s[i] _y = reach_y1s[i] #print _x, _y _z = float( gscript.parse_command('r.what', map=elevation, coordinates=str(_x) + ',' + str(_y)).keys()[0].split('|')[-1]) zr1.append(_z) _x = reach_x2s[i] _y = reach_y2s[i] _z = float( gscript.parse_command('r.what', map=elevation, coordinates=str(_x) + ',' + str(_y)).keys()[0].split('|')[-1]) zr2.append(_z) zr1_cats = [] zr2_cats = [] for i in range(len(reach_cats)): zr1_cats.append((zr1[i], reach_cats[i])) zr2_cats.append((zr2[i], reach_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats) cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats) reachesTopo.table.conn.commit() reachesTopo.close() # Use these to create slope -- backwards possible on DEM! v.db_update(map=reaches, column='SLOPE', value='(zr1 - zr2)/RCHLEN') v.db_update(map=reaches, column='SLOPE', value=Smin, where='SLOPE <= ' + str(Smin)) # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid) # resolution # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o# # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!! v.db_addcolumn(map=reaches, columns='z_topo_mean double precision') v.what_rast(map=reaches, raster=elevation, column='z_topo_mean') #, query_column='z') v.db_update(map=reaches, column='STRTOP', value='z_topo_mean -' + str(h_stream), quiet=True)
## END USER SETTINGS import os import grass.script as gscript from grass.pygrass.modules import Module from grass.pygrass.modules.shortcuts import raster as r, vector as v, general as g, display as d params['username'] += username params['password'] += password dsn = params['dsn'] + ' user=%s password=%s' %(username,password) g.mapset(flags='c',mapset='Gemeinde_'+bfsnum) v.in_ogr(dsn=dsn, layer=params['layers']['gemeinde'], output='region', where='bfs_nummer = 2461',overwrite=True) g.region(vect='region',flags='a',res='2') g.region(flags='p') v.in_ogr(dsn=dsn, layer=params['layers']['fieldblocks'], output='fieldblocks', flags='r') rinwcs = Module("r.in.wcs") rinwcs.inputs.url=params['url'] rinwcs.inputs.username=username rinwcs.inputs.password=password rinwcs(coverage=params['coverages']['elevation']) rinwcs(coverage=params['coverages']['rfactor']) rinwcs(coverage=params['coverages']['kfactor']) rsoillossbare = Module("r.soilloss.bare") rsoillossbare.inputs.flowaccmethod='r.terraflow'
def main(): # options and flags options, flags = gs.parser() input_raster = options["input"] minradius = int(options["minradius"]) maxradius = int(options["maxradius"]) steps = int(options["steps"]) output_raster = options["output"] region = Region() res = np.mean([region.nsres, region.ewres]) # some checks if "@" in output_raster: output_raster = output_raster.split("@")[0] if maxradius <= minradius: gs.fatal("maxradius must be greater than minradius") if steps < 2: gs.fatal("steps must be greater than 1") # calculate radi for generalization radi = np.logspace(np.log(minradius), np.log(maxradius), steps, base=np.exp(1), dtype=np.int) radi = np.unique(radi) sizes = radi * 2 + 1 # multiscale calculation ztpi_maps = list() for step, (radius, size) in enumerate(zip(radi[::-1], sizes[::-1])): gs.message( "Calculating the TPI at radius {radius}".format(radius=radius)) # generalize the dem step_res = res * size step_res_pretty = str(step_res).replace(".", "_") generalized_dem = gs.tempname(4) if size > 15: step_dem = gs.tempname(4) gg.region(res=str(step_res)) gr.resamp_stats( input=input_raster, output=step_dem, method="average", flags="w", ) gr.resamp_rst( input=step_dem, ew_res=res, ns_res=res, elevation=generalized_dem, quiet=True, ) region.write() gg.remove(type="raster", name=step_dem, flags="f", quiet=True) else: gr.neighbors(input=input_raster, output=generalized_dem, size=size) # calculate the tpi tpi = gs.tempname(4) gr.mapcalc(expression="{x} = {a} - {b}".format( x=tpi, a=input_raster, b=generalized_dem)) gg.remove(type="raster", name=generalized_dem, flags="f", quiet=True) # standardize the tpi raster_stats = gr.univar(map=tpi, flags="g", stdout_=PIPE).outputs.stdout raster_stats = parse_key_val(raster_stats) tpi_mean = float(raster_stats["mean"]) tpi_std = float(raster_stats["stddev"]) ztpi = gs.tempname(4) ztpi_maps.append(ztpi) RAST_REMOVE.append(ztpi) gr.mapcalc(expression="{x} = ({a} - {mean})/{std}".format( x=ztpi, a=tpi, mean=tpi_mean, std=tpi_std)) gg.remove(type="raster", name=tpi, flags="f", quiet=True) # integrate if step > 1: tpi_updated2 = gs.tempname(4) gr.mapcalc("{x} = if(abs({a}) > abs({b}), {a}, {b})".format( a=ztpi_maps[step], b=tpi_updated1, x=tpi_updated2)) RAST_REMOVE.append(tpi_updated2) tpi_updated1 = tpi_updated2 else: tpi_updated1 = ztpi_maps[0] RAST_REMOVE.pop() gg.rename(raster=(tpi_updated2, output_raster), quiet=True) # set color theme with RasterRow(output_raster) as src: color_rules = """{minv} blue -1 0:34:198 0 255:255:255 1 255:0:0 {maxv} 110:15:0 """ color_rules = color_rules.format(minv=src.info.min, maxv=src.info.max) gr.colors(map=output_raster, rules="-", stdin_=color_rules, quiet=True)
margin_bottom = 410 margin_left = 300 margin_right = 3700 # x and y values across basin and more #sourcedir = '/media/awickert/Elements/Fluvial 2015/151109_MC_IW_01/Processed/' #sourcedir = '/media/awickert/data3/TerraceExperiment/Fluvial 2015/151109_MC_IW_01/Processed/' #sourcedirs = sorted(next(os.walk('/media/awickert/data3/TerraceExperiment/Fluvial 2015/'))[1]) #sourcedirs = sorted(glob.glob('/data3/TerraceExperiment/Forgotten/*/Processed/')) sourcedirs = sorted(glob.glob('/data3/TerraceExperiment/Fluvial 2015/*/Processed/')) length_y_trimmed = margin_top - margin_bottom length_x_trimmed = margin_right - margin_left g.region(w=margin_left/1000., e=margin_right/1000., s=margin_bottom/1000., n=margin_top/1000., res=0.001, flags='s') # Maps of x and y g.region(w=0, s=0, e=int(np.floor(margin_right*1.5))/1000., n=int(np.floor(margin_top*1.5))/1000.) try: r.mapcalc('x = x()') r.mapcalc('y = y()') except: pass g.region(flags='d') errordirs = [] errorfiles = [] for sourcedir in sourcedirs: DATpaths = sorted(glob.glob(sourcedir+'*.DAT')) for DATpath in DATpaths:
def main(): """ Import any raster or vector data set and add its attribute to a GSFLOW data object """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() # Parsing if options['attrtype'] == 'int': attrtype = 'integer' elif options['attrtype'] == 'float': attrtype = 'double precision' elif options['attrtype'] == 'string': attrtype = 'varchar' else: attrtype = '' ######################################## # PROCESS AND UPLOAD TO DATABASE TABLE # ######################################## if options['vector_area'] is not '': gscript.use_temp_region() g.region(vector=options['map'], res=options['dxy']) v.to_rast(input=options['vector_area'], output='tmp___tmp', use='attr', attribute_column=options['from_column'], quiet=True, overwrite=True) try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns=options['column'], quiet=True) except: pass if attrtype is 'double precision': try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns='tmp_average', quiet=True) except: pass v.rast_stats(map=options['map'], raster='tmp___tmp', column_prefix='tmp', method='average', flags='c', quiet=True) g.remove(type='raster', name='tmp___tmp', flags='f', quiet=True) v.db_renamecolumn(map=options['map'], column=['tmp_average', options['column']], quiet=True) else: try: v.db_addcolumn(map=options['map'], columns=options['column'] + ' ' + attrtype, quiet=True) except: pass gscript.run_command('v.distance', from_=options['map'], to=options['vector_area'], upload='to_attr', to_column=options['from_column'], column=options['column'], quiet=True) elif options['vector_points'] is not '': try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns=options['column'], quiet=True) v.db_addcolumn(map=options['map'], columns=options['column'] + ' ' + attrtype, quiet=True) except: pass gscript.run_command('v.distance', from_=options['map'], to=options['vector_points'], upload='to_attr', to_column=options['from_column'], column=options['column'], quiet=True) elif options['raster'] is not '': try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns=options['column'], quiet=True) except: pass v.rast_stats(map=options['map'], raster=options['raster'], column_prefix='tmp', method='average', flags='c', quiet=True) v.db_renamecolumn(map=options['map'], column=['tmp_average', options['column']], quiet=True) gscript.message("Done.")
def main(): """ Builds a grid for the MODFLOW component of the USGS hydrologic model, GSFLOW. """ options, flags = gscript.parser() basin = options['basin'] pp = options['pour_point'] raster_input = options['raster_input'] dx = options['dx'] dy = options['dy'] grid = options['output'] mask = options['mask_output'] bc_cell = options['bc_cell'] # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp' """ # Fatal if raster input and output are not both set _lena0 = (len(raster_input) == 0) _lenb0 = (len(raster_output) == 0) if _lena0 + _lenb0 == 1: gscript.fatal("You must set both raster input and output, or neither.") """ # Fatal if bc_cell set but mask and grid are false if bc_cell != '': if (mask == '') or (pp == ''): gscript.fatal( 'Mask and pour point must be set to define b.c. cell') # Create grid -- overlaps DEM, three cells of padding gscript.use_temp_region() reg = gscript.region() reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows']) reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols']) g.region(vector=basin, ewres=dx, nsres=dy) regnew = gscript.region() # Use a grid ratio -- don't match exactly the desired MODFLOW resolution grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres']) grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres']) # Get S, W, and then move the unit number of grid cells over to get N and E # and include 3 cells of padding around the whole watershed _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres'])) _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0] _s = float(reg_grid_edges_sn[_s_idx]) _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'], grid_ratio_ns * reg['nsres']) _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres'])) _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0] _n = float(_n_grid[_n_idx]) _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres'])) _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0] _w = float(reg_grid_edges_we[_w_idx]) _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'], grid_ratio_ew * reg['ewres']) _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres'])) _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0] _e = float(_e_grid[_e_idx]) # Finally make the region g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) # And then make the grid v.mkgrid(map=grid, overwrite=gscript.overwrite()) # Cell numbers (row, column, continuous ID) v.db_addcolumn(map=grid, columns='id int', quiet=True) colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(grid, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() rows = colValues[:, colNames == 'row'].astype(int).squeeze() cols = colValues[:, colNames == 'col'].astype(int).squeeze() nrows = np.max(rows) ncols = np.max(cols) cats = np.ravel([cats]) _id = np.ravel([ncols * (rows - 1) + cols]) _id_cat = [] for i in range(len(_id)): _id_cat.append((_id[i], cats[i])) gridTopo = VectorTopo(grid) gridTopo.open('rw') cur = gridTopo.table.conn.cursor() cur.executemany("update " + grid + " set id=? where cat=?", _id_cat) gridTopo.table.conn.commit() gridTopo.close() # Cell area v.db_addcolumn(map=grid, columns='area_m2', quiet=True) v.to_db(map=grid, option='area', units='meters', columns='area_m2', quiet=True) # Basin mask if len(mask) > 0: # Fine resolution region: g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres']) # Rasterize basin v.to_rast(input=basin, output=mask, use='val', value=1, overwrite=gscript.overwrite(), quiet=True) # Coarse resolution region: g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) r.resamp_stats(input=mask, output=mask, method='sum', overwrite=True, quiet=True) r.mapcalc('tmp' + ' = ' + mask + ' > 0', overwrite=True, quiet=True) g.rename(raster=('tmp', mask), overwrite=True, quiet=True) r.null(map=mask, null=0, quiet=True) # Add mask location (1 vs 0) in the MODFLOW grid v.db_addcolumn(map=grid, columns='basinmask double precision', quiet=True) v.what_rast(map=grid, type='centroid', raster=mask, column='basinmask') """ # Resampled raster if len(raster_output) > 0: r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True) """ # Pour point if len(pp) > 0: v.db_addcolumn(map=pp, columns=('row integer', 'col integer'), quiet=True) v.build(map=pp, quiet=True) v.what_vect(map=pp, query_map=grid, column='row', query_column='row', quiet=True) v.what_vect(map=pp, query_map=grid, column='col', query_column='col', quiet=True) # Next point downstream of the pour point # Requires pp (always) and mask (sometimes) # Dependency set above w/ gscript.fatal if len(bc_cell) > 0: ########## NEED TO USE TRUE TEMPORARY FILE ########## # May not work with dx != dy! v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True) r.buffer(input='tmp', output='tmp', distances=float(dx) * 1.5, overwrite=True) r.mapcalc('tmp2 = if(tmp==2,1,null()) * ' + raster_input, overwrite=True) g.rename(raster=('tmp2', 'tmp'), overwrite=True, quiet=True) #r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True) #g.region(rast='tmp') #r.null(map=raster_input, r.drain(input=raster_input, start_points=pp, output='tmp2', overwrite=True) r.mapcalc('tmp3 = tmp2 * tmp', overwrite=True, quiet=True) g.rename(raster=('tmp3', 'tmp'), overwrite=True, quiet=True) #r.null(map='tmp', setnull=0) # Not necessary: center point removed above r.to_vect(input='tmp', output=bc_cell, type='point', column='z', overwrite=gscript.overwrite(), quiet=True) v.db_addcolumn(map=bc_cell, columns=('row integer', 'col integer', 'x double precision', 'y double precision'), quiet=True) v.build(map=bc_cell, quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='row', \ query_column='row', quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='col', \ query_column='col', quiet=True) v.to_db(map=bc_cell, option='coor', columns=('x,y')) # Find out if this is diagonal: finite difference works only N-S, W-E colNames = np.array(gscript.vector_db_select(pp, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(pp, layer=1)['values'].values()) pp_row = int(colValues[:, colNames == 'row'].astype(int).squeeze()) pp_col = int(colValues[:, colNames == 'col'].astype(int).squeeze()) colNames = np.array( gscript.vector_db_select(bc_cell, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(bc_cell, layer=1)['values'].values()) bc_row = int(colValues[:, colNames == 'row'].astype(int).squeeze()) bc_col = int(colValues[:, colNames == 'col'].astype(int).squeeze()) # Also get x and y while we are at it: may be needed later bc_x = float(colValues[:, colNames == 'x'].astype(float).squeeze()) bc_y = float(colValues[:, colNames == 'y'].astype(float).squeeze()) if (bc_row != pp_row) and (bc_col != pp_col): # If not diagonal, two possible locations that are adjacent # to the pour point _col1, _row1 = str(bc_col), str(pp_row) _col2, _row2 = str(pp_col), str(bc_row) # Check if either of these is covered by the basin mask _ismask_1 = gscript.vector_db_select(grid, layer=1, where='(row == ' + _row1 + ') AND (col ==' + _col1 + ')', columns='basinmask') _ismask_1 = int(_ismask_1['values'].values()[0][0]) _ismask_2 = gscript.vector_db_select(grid, layer=1, where='(row == ' + _row2 + ') AND (col ==' + _col2 + ')', columns='basinmask') _ismask_2 = int(_ismask_2['values'].values()[0][0]) # If both covered by mask, error if _ismask_1 and _ismask_2: gscript.fatal( 'All possible b.c. cells covered by basin mask.\n\ Contact the developer: awickert (at) umn(.)edu') # Otherwise, those that keep those that are not covered by basin # mask and set ... # ... wait, do we want the point that touches as few interior # cells as possible? # maybe just try setting both and seeing what happens for now! else: # Get dx and dy dx = gscript.region()['ewres'] dy = gscript.region()['nsres'] # Build tool to handle multiple b.c. cells? bcvect = vector.Vector(bc_cell) bcvect.open('rw') _cat_i = 2 if not _ismask_1: # _x should always be bc_x, but writing generalized code _x = bc_x + dx * (int(_col1) - bc_col) # col 1 at w edge _y = bc_y - dy * (int(_row1) - bc_row) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row1, _col1, _x, _y), ) bcvect.table.conn.commit() _cat_i += 1 if not _ismask_2: # _y should always be bc_y, but writing generalized code _x = bc_x + dx * (int(_col2) - bc_col) # col 1 at w edge _y = bc_y - dy * (int(_row2) - bc_row) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row2, _col2, _x, _y), ) bcvect.table.conn.commit() # Build database table and vector geometry bcvect.build() bcvect.close() g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres'])
def main(): """ Builds a grid for the MODFLOW component of the USGS hydrologic model, GSFLOW. """ options, flags = gscript.parser() basin = options["basin"] pp = options["pour_point"] raster_input = options["raster_input"] dx = options["dx"] dy = options["dy"] grid = options["output"] mask = options["mask_output"] bc_cell = options["bc_cell"] # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp' """ # Fatal if raster input and output are not both set _lena0 = (len(raster_input) == 0) _lenb0 = (len(raster_output) == 0) if _lena0 + _lenb0 == 1: gscript.fatal("You must set both raster input and output, or neither.") """ # Fatal if bc_cell set but mask and grid are false if bc_cell != "": if (mask == "") or (pp == ""): gscript.fatal( "Mask and pour point must be set to define b.c. cell") # Create grid -- overlaps DEM, three cells of padding g.region(raster=raster_input, ewres=dx, nsres=dy) gscript.use_temp_region() reg = gscript.region() reg_grid_edges_sn = np.linspace(reg["s"], reg["n"], reg["rows"]) reg_grid_edges_we = np.linspace(reg["w"], reg["e"], reg["cols"]) g.region(vector=basin, ewres=dx, nsres=dy) regnew = gscript.region() # Use a grid ratio -- don't match exactly the desired MODFLOW resolution grid_ratio_ns = np.round(regnew["nsres"] / reg["nsres"]) grid_ratio_ew = np.round(regnew["ewres"] / reg["ewres"]) # Get S, W, and then move the unit number of grid cells over to get N and E # and include 3 cells of padding around the whole watershed _s_dist = np.abs(reg_grid_edges_sn - (regnew["s"] - 3.0 * regnew["nsres"])) _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0] _s = float(reg_grid_edges_sn[_s_idx]) _n_grid = np.arange(_s, reg["n"] + 3 * grid_ratio_ns * reg["nsres"], grid_ratio_ns * reg["nsres"]) _n_dist = np.abs(_n_grid - (regnew["n"] + 3.0 * regnew["nsres"])) _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0] _n = float(_n_grid[_n_idx]) _w_dist = np.abs(reg_grid_edges_we - (regnew["w"] - 3.0 * regnew["ewres"])) _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0] _w = float(reg_grid_edges_we[_w_idx]) _e_grid = np.arange(_w, reg["e"] + 3 * grid_ratio_ew * reg["ewres"], grid_ratio_ew * reg["ewres"]) _e_dist = np.abs(_e_grid - (regnew["e"] + 3.0 * regnew["ewres"])) _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0] _e = float(_e_grid[_e_idx]) # Finally make the region g.region( w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg["nsres"]), ewres=str(grid_ratio_ew * reg["ewres"]), ) # And then make the grid v.mkgrid(map=grid, overwrite=gscript.overwrite()) # Cell numbers (row, column, continuous ID) v.db_addcolumn(map=grid, columns="id int", quiet=True) colNames = np.array(gscript.vector_db_select(grid, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(grid, layer=1)["values"].values()) cats = colValues[:, colNames == "cat"].astype(int).squeeze() rows = colValues[:, colNames == "row"].astype(int).squeeze() cols = colValues[:, colNames == "col"].astype(int).squeeze() nrows = np.max(rows) ncols = np.max(cols) cats = np.ravel([cats]) _id = np.ravel([ncols * (rows - 1) + cols]) _id_cat = [] for i in range(len(_id)): _id_cat.append((_id[i], cats[i])) gridTopo = VectorTopo(grid) gridTopo.open("rw") cur = gridTopo.table.conn.cursor() cur.executemany("update " + grid + " set id=? where cat=?", _id_cat) gridTopo.table.conn.commit() gridTopo.close() # Cell area v.db_addcolumn(map=grid, columns="area_m2 double precision", quiet=True) v.to_db(map=grid, option="area", units="meters", columns="area_m2", quiet=True) # Basin mask if len(mask) > 0: # Fine resolution region: g.region( n=reg["n"], s=reg["s"], w=reg["w"], e=reg["e"], nsres=reg["nsres"], ewres=reg["ewres"], ) # Rasterize basin v.to_rast( input=basin, output=mask, use="val", value=1, overwrite=gscript.overwrite(), quiet=True, ) # Coarse resolution region: g.region( w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg["nsres"]), ewres=str(grid_ratio_ew * reg["ewres"]), ) r.resamp_stats(input=mask, output=mask, method="sum", overwrite=True, quiet=True) r.mapcalc("tmp" + " = " + mask + " > 0", overwrite=True, quiet=True) g.rename(raster=("tmp", mask), overwrite=True, quiet=True) r.null(map=mask, null=0, quiet=True) # Add mask location (1 vs 0) in the MODFLOW grid v.db_addcolumn(map=grid, columns="basinmask double precision", quiet=True) v.what_rast(map=grid, type="centroid", raster=mask, column="basinmask") """ # Resampled raster if len(raster_output) > 0: r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True) """ # Pour point if len(pp) > 0: v.db_addcolumn(map=pp, columns=("row integer", "col integer"), quiet=True) v.build(map=pp, quiet=True) v.what_vect(map=pp, query_map=grid, column="row", query_column="row", quiet=True) v.what_vect(map=pp, query_map=grid, column="col", query_column="col", quiet=True) # Next point downstream of the pour point # Requires pp (always) and mask (sometimes) # Dependency set above w/ gscript.fatal # g.region(raster='DEM') # dx = gscript.region()['ewres'] # dy = gscript.region()['nsres'] if len(bc_cell) > 0: ########## NEED TO USE TRUE TEMPORARY FILE ########## # May not work with dx != dy! v.to_rast(input=pp, output="tmp", use="val", value=1, overwrite=True) r.buffer(input="tmp", output="tmp", distances=float(dx) * 1.5, overwrite=True) r.mapcalc("tmp2 = if(tmp==2,1,null()) * " + raster_input, overwrite=True) # r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True) # g.region(rast='tmp') # r.null(map=raster_input, # g.region(raster=raster_input) # r.resample(input=raster_input, output='tmp3', overwrite=True) r.resamp_stats(input=raster_input, output="tmp3", method="minimum", overwrite=True) r.drain(input="tmp3", start_points=pp, output="tmp", overwrite=True) # g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns*reg['nsres']), ewres=str(grid_ratio_ew*reg['ewres'])) # r.resamp_stats(input='tmp2', output='tmp3', overwrite=True) # g.rename(raster=('tmp3','tmp2'), overwrite=True, quiet=True) r.mapcalc("tmp3 = tmp2 * tmp", overwrite=True, quiet=True) g.rename(raster=("tmp3", "tmp"), overwrite=True, quiet=True) # r.null(map='tmp', setnull=0) # Not necessary: center point removed above r.to_vect( input="tmp", output=bc_cell, type="point", column="z", overwrite=gscript.overwrite(), quiet=True, ) v.db_addcolumn( map=bc_cell, columns=( "row integer", "col integer", "x double precision", "y double precision", ), quiet=True, ) v.build(map=bc_cell, quiet=True) v.what_vect(map=bc_cell, query_map=grid, column="row", query_column="row", quiet=True) v.what_vect(map=bc_cell, query_map=grid, column="col", query_column="col", quiet=True) v.to_db(map=bc_cell, option="coor", columns=("x,y")) # Of the candidates, the pour point is the closest one # v.db_addcolumn(map=bc_cell, columns=('dist_to_pp double precision'), quiet=True) # v.distance(from_=bc_cell, to=pp, upload='dist', column='dist_to_pp') # Find out if this is diagonal: finite difference works only N-S, W-E colNames = np.array(gscript.vector_db_select(pp, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(pp, layer=1)["values"].values()) pp_row = colValues[:, colNames == "row"].astype(int).squeeze() pp_col = colValues[:, colNames == "col"].astype(int).squeeze() colNames = np.array( gscript.vector_db_select(bc_cell, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(bc_cell, layer=1)["values"].values()) bc_row = colValues[:, colNames == "row"].astype(int).squeeze() bc_col = colValues[:, colNames == "col"].astype(int).squeeze() # Also get x and y while we are at it: may be needed later bc_x = colValues[:, colNames == "x"].astype(float).squeeze() bc_y = colValues[:, colNames == "y"].astype(float).squeeze() if (bc_row != pp_row).all() and (bc_col != pp_col).all(): if bc_row.ndim > 0: if len(bc_row) > 1: for i in range(len(bc_row)): """ UNTESTED!!!! And probably unimportant -- having 2 cells with river going through them is most likely going to happen with two adjacent cells -- so a side and a corner """ _col1, _row1 = str(bc_col[i]), str(pp_row[i]) _col2, _row2 = str(pp_col[i]), str(bc_row[i]) # Check if either of these is covered by the basin mask _ismask_1 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row1 + ") AND (col ==" + _col1 + ")", columns="basinmask", ) _ismask_1 = int(_ismask_1["values"].values()[0][0]) _ismask_2 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row2 + ") AND (col ==" + _col2 + ")", columns="basinmask", ) _ismask_2 = int(_ismask_2["values"].values()[0][0]) # check if either of these is the other point """ NOT DOING THIS YET -- HAVEN'T THOUGHT THROUGH IF ACTUALLY NECESSARY. (And this is an edge case anyway) """ # If both covered by mask, error if _ismask_1 and _ismask_2: gscript.fatal( "All possible b.c. cells covered by basin mask.\n\ Contact the developer: awickert (at) umn(.)edu" ) # If not diagonal, two possible locations that are adjacent # to the pour point _col1, _row1 = str(bc_col), str(pp_row) _col2, _row2 = str(pp_col), str(bc_row) # Check if either of these is covered by the basin mask _ismask_1 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row1 + ") AND (col ==" + _col1 + ")", columns="basinmask", ) _ismask_1 = int(_ismask_1["values"].values()[0][0]) _ismask_2 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row2 + ") AND (col ==" + _col2 + ")", columns="basinmask", ) _ismask_2 = int(_ismask_2["values"].values()[0][0]) # If both covered by mask, error if _ismask_1 and _ismask_2: gscript.fatal( "All possible b.c. cells covered by basin mask.\n\ Contact the developer: awickert (at) umn(.)edu") # Otherwise, those that keep those that are not covered by basin # mask and set ... # ... wait, do we want the point that touches as few interior # cells as possible? # maybe just try setting both and seeing what happens for now! else: # Get dx and dy # dx = gscript.region()['ewres'] # dy = gscript.region()['nsres'] # Build tool to handle multiple b.c. cells? bcvect = vector.Vector(bc_cell) bcvect.open("rw") _cat_i = 2 if _ismask_1 != 0: # _x should always be bc_x, but writing generalized code _x = bc_x + float(dx) * (int(_col1) - bc_col ) # col 1 at w edge _y = bc_y - float(dy) * (int(_row1) - bc_row ) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row1, _col1, _x, _y), ) bcvect.table.conn.commit() _cat_i += 1 if _ismask_2 != 0: # _y should always be bc_y, but writing generalized code _x = bc_x + float(dx) * (int(_col2) - bc_col ) # col 1 at w edge _y = bc_y - float(dy) * (int(_row2) - bc_row ) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row2, _col2, _x, _y), ) bcvect.table.conn.commit() # Build database table and vector geometry bcvect.build() bcvect.close() g.region( n=reg["n"], s=reg["s"], w=reg["w"], e=reg["e"], nsres=reg["nsres"], ewres=reg["ewres"], )
def main(): """ Import any raster or vector data set and add its attribute to a GSFLOW data object """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() # Parsing if options["attrtype"] == "int": attrtype = "integer" elif options["attrtype"] == "float": attrtype = "double precision" elif options["attrtype"] == "string": attrtype = "varchar" else: attrtype = "" ######################################## # PROCESS AND UPLOAD TO DATABASE TABLE # ######################################## if options["vector_area"] is not "": gscript.use_temp_region() g.region(vector=options["map"], res=options["dxy"]) v.to_rast( input=options["vector_area"], output="tmp___tmp", use="attr", attribute_column=options["from_column"], quiet=True, overwrite=True, ) try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns=options["column"], quiet=True) except: pass if attrtype is "double precision": try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns="tmp_average", quiet=True) except: pass v.rast_stats( map=options["map"], raster="tmp___tmp", column_prefix="tmp", method="average", flags="c", quiet=True, ) g.remove(type="raster", name="tmp___tmp", flags="f", quiet=True) v.db_renamecolumn( map=options["map"], column=["tmp_average", options["column"]], quiet=True, ) else: try: v.db_addcolumn( map=options["map"], columns=options["column"] + " " + attrtype, quiet=True, ) except: pass gscript.run_command( "v.distance", from_=options["map"], to=options["vector_area"], upload="to_attr", to_column=options["from_column"], column=options["column"], quiet=True, ) elif options["vector_points"] is not "": try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns=options["column"], quiet=True) v.db_addcolumn( map=options["map"], columns=options["column"] + " " + attrtype, quiet=True, ) except: pass gscript.run_command( "v.distance", from_=options["map"], to=options["vector_points"], upload="to_attr", to_column=options["from_column"], column=options["column"], quiet=True, ) elif options["raster"] is not "": try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns=options["column"], quiet=True) except: pass v.rast_stats( map=options["map"], raster=options["raster"], column_prefix="tmp", method="average", flags="c", quiet=True, ) v.db_renamecolumn(map=options["map"], column=["tmp_average", options["column"]], quiet=True) gscript.message("Done.")
def main(): """ Builds a grid for the MODFLOW component of the USGS hydrologic model, GSFLOW. """ options, flags = gscript.parser() basin = options['basin'] pp = options['pour_point'] raster_input = options['raster_input'] dx = options['dx'] dy = options['dy'] grid = options['output'] mask = options['mask_output'] bc_cell = options['bc_cell'] # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp' """ # Fatal if raster input and output are not both set _lena0 = (len(raster_input) == 0) _lenb0 = (len(raster_output) == 0) if _lena0 + _lenb0 == 1: grass.fatal("You must set both raster input and output, or neither.") """ # Create grid -- overlaps DEM, one cell of padding gscript.use_temp_region() reg = gscript.region() reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows']) reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols']) g.region(vector=basin, ewres=dx, nsres=dy) regnew = gscript.region() # Use a grid ratio -- don't match exactly the desired MODFLOW resolution grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres']) grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres']) # Get S, W, and then move the unit number of grid cells over to get N and E # and include 3 cells of padding around the whole watershed _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres'])) _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0] _s = float(reg_grid_edges_sn[_s_idx]) _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'], grid_ratio_ns * reg['nsres']) _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres'])) _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0] _n = float(_n_grid[_n_idx]) _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres'])) _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0] _w = float(reg_grid_edges_we[_w_idx]) _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'], grid_ratio_ew * reg['ewres']) _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres'])) _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0] _e = float(_e_grid[_e_idx]) # Finally make the region g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) # And then make the grid v.mkgrid(map=grid, overwrite=gscript.overwrite()) # Cell numbers (row, column, continuous ID) v.db_addcolumn(map=grid, columns='id int', quiet=True) colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(grid, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() rows = colValues[:, colNames == 'row'].astype(int).squeeze() cols = colValues[:, colNames == 'col'].astype(int).squeeze() nrows = np.max(rows) ncols = np.max(cols) cats = np.ravel([cats]) _id = np.ravel([ncols * (rows - 1) + cols]) _id_cat = [] for i in range(len(_id)): _id_cat.append((_id[i], cats[i])) gridTopo = VectorTopo(grid) gridTopo.open('rw') cur = gridTopo.table.conn.cursor() cur.executemany("update " + grid + " set id=? where cat=?", _id_cat) gridTopo.table.conn.commit() gridTopo.close() # Cell area v.db_addcolumn(map=grid, columns='area_m2', quiet=True) v.to_db(map=grid, option='area', units='meters', columns='area_m2', quiet=True) # Basin mask if len(mask) > 0: # Fine resolution region: g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres']) # Rasterize basin v.to_rast(input=basin, output=mask, use='val', value=1, overwrite=gscript.overwrite(), quiet=True) # Coarse resolution region: g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) r.resamp_stats(input=mask, output=mask, method='sum', overwrite=True, quiet=True) r.mapcalc(mask + ' = ' + mask + ' > 0', overwrite=True, quiet=True) """ # Resampled raster if len(raster_output) > 0: r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True) """ # Pour point if len(pp) > 0: v.db_addcolumn(map=pp, columns=('row integer', 'col integer'), quiet=True) v.build(map=pp, quiet=True) v.what_vect(map=pp, query_map=grid, column='row', query_column='row', quiet=True) v.what_vect(map=pp, query_map=grid, column='col', query_column='col', quiet=True) # Next point downstream of the pour point if len(bc_cell) > 0: ########## NEED TO USE TRUE TEMPORARY FILE ########## # May not work with dx != dy! v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True) r.buffer(input='tmp', output='tmp', distances=float(dx) * 1.5, overwrite=True) r.mapcalc('tmp = (tmp == 2) * ' + raster_input, overwrite=True) r.drain(input=raster_input, start_points=pp, output='tmp2', overwrite=True) r.mapcalc('tmp = tmp2 * tmp', overwrite=True) r.null(map='tmp', setnull=0) r.to_vect(input='tmp', output=bc_cell, type='point', column='z', overwrite=gscript.overwrite(), quiet=True) v.db_addcolumn(map=bc_cell, columns=('row integer', 'col integer'), quiet=True) v.build(map=bc_cell, quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='row', \ query_column='row', quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='col', \ query_column='col', quiet=True) g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres'])
def rasterizeCs(reso): g.region(res=reso) v.to_rast(csmap, output = 'cr', use='attr',attribute_column=fcol, overwrite = True)
# x and y values across basin and more #sourcedir = '/media/awickert/Elements/Fluvial 2015/151109_MC_IW_01/Processed/' #sourcedir = '/media/awickert/data3/TerraceExperiment/Fluvial 2015/151109_MC_IW_01/Processed/' #sourcedirs = sorted(next(os.walk('/media/awickert/data3/TerraceExperiment/Fluvial 2015/'))[1]) #sourcedirs = sorted(glob.glob('/data3/TerraceExperiment/Forgotten/*/Processed/')) sourcedirs = sorted( glob.glob('/data3/TerraceExperiment/Fluvial 2015/*/Processed/')) length_y_trimmed = margin_top - margin_bottom length_x_trimmed = margin_right - margin_left g.region(w=margin_left / 1000., e=margin_right / 1000., s=margin_bottom / 1000., n=margin_top / 1000., res=0.001, flags='s') # Maps of x and y g.region(w=0, s=0, e=int(np.floor(margin_right * 1.5)) / 1000., n=int(np.floor(margin_top * 1.5)) / 1000.) try: r.mapcalc('x = x()') r.mapcalc('y = y()') except: pass g.region(flags='d')
reaches = reaches_df['Reach_no'] n = reaches_df['N'] s = reaches_df['S'] e = reaches_df['E'] w = reaches_df['W'] for i, reach_no in enumerate(reaches): # make the directory for this reach this_dir = DataDirectory + 'Upper_Miss_reach' + str(reach_no) + '/' try: os.mkdir(this_dir) except: pass # set the region g.region(n=str(n[i]), s=str(s[i]), e=str(e[i]), w=str(w[i]), flags='p') # now output the ENVI bil file r.out_gdal(input="Upper_Miss_filled", format="ENVI", type="Float32", nodata=-9999, output=this_dir + 'Upper_Miss_reach' + str(reach_no) + '.bil') # clip the baseline to this region v.in_region(output='region_tmp', overwrite=True) v.overlay(ainput='Mississippi_River', atype='line', binput='region_tmp', output='Mississippi_River_clip_tmp', operator='and',
def main(): elevation = options['elevation'] slope = options['slope'] flat_thres = float(options['flat_thres']) curv_thres = float(options['curv_thres']) filter_size = int(options['filter_size']) counting_size = int(options['counting_size']) nclasses = int(options['classes']) texture = options['texture'] convexity = options['convexity'] concavity = options['concavity'] features = options['features'] # remove mapset from output name in case of overwriting existing map texture = texture.split('@')[0] convexity = convexity.split('@')[0] concavity = concavity.split('@')[0] features = features.split('@')[0] # store current region settings global current_reg current_reg = parse_key_val(g.region(flags='pg', stdout_=PIPE).outputs.stdout) del current_reg['projection'] del current_reg['zone'] del current_reg['cells'] # check for existing mask and backup if found global mask_test mask_test = gs.list_grouped( type='rast', pattern='MASK')[gs.gisenv()['MAPSET']] if mask_test: global original_mask original_mask = temp_map('tmp_original_mask') g.copy(raster=['MASK', original_mask]) # error checking if flat_thres < 0: gs.fatal('Parameter thres cannot be negative') if filter_size % 2 == 0 or counting_size % 2 == 0: gs.fatal( 'Filter or counting windows require an odd-numbered window size') if filter_size >= counting_size: gs.fatal( 'Filter size needs to be smaller than the counting window size') if features != '' and slope == '': gs.fatal('Need to supply a slope raster in order to produce the terrain classification') # Terrain Surface Texture ------------------------------------------------- # smooth the dem gs.message("Calculating terrain surface texture...") gs.message( "1. Smoothing input DEM with a {n}x{n} median filter...".format( n=filter_size)) filtered_dem = temp_map('tmp_filtered_dem') gs.run_command("r.neighbors", input = elevation, method = "median", size = filter_size, output = filtered_dem, flags='c', quiet=True) # extract the pits and peaks based on the threshold pitpeaks = temp_map('tmp_pitpeaks') gs.message("2. Extracting pits and peaks with difference > thres...") r.mapcalc(expression='{x} = if ( abs({dem}-{median})>{thres}, 1, 0)'.format( x=pitpeaks, dem=elevation, thres=flat_thres, median=filtered_dem), quiet=True) # calculate density of pits and peaks gs.message("3. Using resampling filter to create terrain texture...") window_radius = (counting_size-1)/2 y_radius = float(current_reg['ewres'])*window_radius x_radius = float(current_reg['nsres'])*window_radius resample = temp_map('tmp_density') r.resamp_filter(input=pitpeaks, output=resample, filter=['bartlett','gauss'], radius=[x_radius,y_radius], quiet=True) # convert to percentage gs.message("4. Converting to percentage...") r.mask(raster=elevation, overwrite=True, quiet=True) r.mapcalc(expression='{x} = float({y} * 100)'.format(x=texture, y=resample), quiet=True) r.mask(flags='r', quiet=True) r.colors(map=texture, color='haxby', quiet=True) # Terrain convexity/concavity --------------------------------------------- # surface curvature using lacplacian filter gs.message("Calculating terrain convexity and concavity...") gs.message("1. Calculating terrain curvature using laplacian filter...") # grow the map to remove border effects and run laplacian filter dem_grown = temp_map('tmp_elevation_grown') laplacian = temp_map('tmp_laplacian') g.region(n=float(current_reg['n']) + (float(current_reg['nsres']) * filter_size), s=float(current_reg['s']) - (float(current_reg['nsres']) * filter_size), w=float(current_reg['w']) - (float(current_reg['ewres']) * filter_size), e=float(current_reg['e']) + (float(current_reg['ewres']) * filter_size)) r.grow(input=elevation, output=dem_grown, radius=filter_size, quiet=True) r.mfilter( input=dem_grown, output=laplacian, filter=string_to_rules(laplacian_matrix(filter_size)), quiet=True) # extract convex and concave pixels gs.message("2. Extracting convexities and concavities...") convexities = temp_map('tmp_convexities') concavities = temp_map('tmp_concavities') r.mapcalc( expression='{x} = if({laplacian}>{thres}, 1, 0)'\ .format(x=convexities, laplacian=laplacian, thres=curv_thres), quiet=True) r.mapcalc( expression='{x} = if({laplacian}<-{thres}, 1, 0)'\ .format(x=concavities, laplacian=laplacian, thres=curv_thres), quiet=True) # calculate density of convexities and concavities gs.message("3. Using resampling filter to create surface convexity/concavity...") resample_convex = temp_map('tmp_convex') resample_concav = temp_map('tmp_concav') r.resamp_filter(input=convexities, output=resample_convex, filter=['bartlett','gauss'], radius=[x_radius,y_radius], quiet=True) r.resamp_filter(input=concavities, output=resample_concav, filter=['bartlett','gauss'], radius=[x_radius,y_radius], quiet=True) # convert to percentages gs.message("4. Converting to percentages...") g.region(**current_reg) r.mask(raster=elevation, overwrite=True, quiet=True) r.mapcalc(expression='{x} = float({y} * 100)'.format(x=convexity, y=resample_convex), quiet=True) r.mapcalc(expression='{x} = float({y} * 100)'.format(x=concavity, y=resample_concav), quiet=True) r.mask(flags='r', quiet=True) # set colors r.colors_stddev(map=convexity, quiet=True) r.colors_stddev(map=concavity, quiet=True) # Terrain classification Flowchart----------------------------------------- if features != '': gs.message("Performing terrain surface classification...") # level 1 produces classes 1 thru 8 # level 2 produces classes 5 thru 12 # level 3 produces classes 9 thru 16 if nclasses == 8: levels = 1 if nclasses == 12: levels = 2 if nclasses == 16: levels = 3 classif = [] for level in range(levels): # mask previous classes x:x+4 if level != 0: min_cla = (4*(level+1))-4 clf_msk = temp_map('tmp_clf_mask') rules = '1:{0}:1'.format(min_cla) r.recode( input=classif[level-1], output=clf_msk, rules=string_to_rules(rules), overwrite=True) r.mask(raster=clf_msk, flags='i', quiet=True, overwrite=True) # image statistics smean = r.univar( map=slope, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep) smean = [i for i in smean if i.startswith('mean=') is True][0].split('=')[1] cmean = r.univar( map=convexity, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep) cmean = [i for i in cmean if i.startswith('mean=') is True][0].split('=')[1] tmean = r.univar( map=texture, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep) tmean = [i for i in tmean if i.startswith('mean=') is True][0].split('=')[1] classif.append(temp_map('tmp_classes')) if level != 0: r.mask(flags='r', quiet=True) classification(level+1, slope, smean, texture, tmean, convexity, cmean, classif[level]) # combine decision trees merged = [] for level in range(0, levels): if level > 0: min_cla = (4*(level+1))-4 merged.append(temp_map('tmp_merged')) r.mapcalc( expression='{x} = if({a}>{min}, {b}, {a})'.format( x=merged[level], min=min_cla, a=merged[level-1], b=classif[level])) else: merged.append(classif[level]) g.rename(raster=[merged[-1], features], quiet=True) del TMP_RAST[-1] # Write metadata ---------------------------------------------------------- history = 'r.terrain.texture ' for key,val in options.iteritems(): history += key + '=' + str(val) + ' ' r.support(map=texture, title=texture, description='generated by r.terrain.texture', history=history) r.support(map=convexity, title=convexity, description='generated by r.terrain.texture', history=history) r.support(map=concavity, title=concavity, description='generated by r.terrain.texture', history=history) if features != '': r.support(map=features, title=features, description='generated by r.terrain.texture', history=history) # write color and category rules to tempfiles r.category( map=features, rules=string_to_rules(categories(nclasses)), separator='pipe') r.colors( map=features, rules=string_to_rules(colors(nclasses)), quiet=True) return 0
def main(): """ Builds river reaches for input to the USGS hydrologic model, GSFLOW. These reaches link the PRMS stream segments to the MODFLOW grid cells. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() segments = options["segment_input"] grid = options["grid_input"] reaches = options["output"] elevation = options["elevation"] Smin = options["s_min"] h_stream = options["h_stream"] x1 = options["upstream_easting_column_seg"] y1 = options["upstream_northing_column_seg"] x2 = options["downstream_easting_column_seg"] y2 = options["downstream_northing_column_seg"] tostream = options["tostream_cat_column_seg"] # Hydraulic paramters STRTHICK = options["strthick"] STRHC1 = options["strhc1"] THTS = options["thts"] THTI = options["thti"] EPS = options["eps"] UHC = options["uhc"] # Build reach maps by overlaying segments on grid if len(gscript.find_file(segments, element="vector")["name"]) > 0: v.extract( input=segments, output="GSFLOW_TEMP__", type="line", quiet=True, overwrite=True, ) v.overlay( ainput="GSFLOW_TEMP__", atype="line", binput=grid, output=reaches, operator="and", overwrite=gscript.overwrite(), quiet=True, ) g.remove(type="vector", name="GSFLOW_TEMP__", quiet=True, flags="f") else: gscript.fatal('No vector file "' + segments + '" found.') # Start editing database table reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") # Rename a,b columns reachesTopo.table.columns.rename("a_" + x1, "x1") reachesTopo.table.columns.rename("a_" + x2, "x2") reachesTopo.table.columns.rename("a_" + y1, "y1") reachesTopo.table.columns.rename("a_" + y2, "y2") reachesTopo.table.columns.rename("a_NSEG", "NSEG") reachesTopo.table.columns.rename("a_ISEG", "ISEG") reachesTopo.table.columns.rename("a_stream_type", "stream_type") reachesTopo.table.columns.rename("a_type_code", "type_code") reachesTopo.table.columns.rename("a_cat", "rnum_cat") reachesTopo.table.columns.rename("a_" + tostream, "tostream") reachesTopo.table.columns.rename("a_id", "segment_id") reachesTopo.table.columns.rename("a_OUTSEG", "OUTSEG") reachesTopo.table.columns.rename("b_row", "row") reachesTopo.table.columns.rename("b_col", "col") reachesTopo.table.columns.rename("b_id", "cell_id") # Drop unnecessary columns cols = reachesTopo.table.columns.names() for col in cols: if (col[:2] == "a_") or (col[:2] == "b_"): reachesTopo.table.columns.drop(col) # Add new columns to 'reaches' reachesTopo.table.columns.add("KRCH", "integer") reachesTopo.table.columns.add("IRCH", "integer") reachesTopo.table.columns.add("JRCH", "integer") reachesTopo.table.columns.add("IREACH", "integer") reachesTopo.table.columns.add("RCHLEN", "double precision") reachesTopo.table.columns.add("STRTOP", "double precision") reachesTopo.table.columns.add("SLOPE", "double precision") reachesTopo.table.columns.add("STRTHICK", "double precision") reachesTopo.table.columns.add("STRHC1", "double precision") reachesTopo.table.columns.add("THTS", "double precision") reachesTopo.table.columns.add("THTI", "double precision") reachesTopo.table.columns.add("EPS", "double precision") reachesTopo.table.columns.add("UHC", "double precision") reachesTopo.table.columns.add("xr1", "double precision") reachesTopo.table.columns.add("xr2", "double precision") reachesTopo.table.columns.add("yr1", "double precision") reachesTopo.table.columns.add("yr2", "double precision") # Commit columns before editing (necessary?) reachesTopo.table.conn.commit() reachesTopo.close() # Update some columns that can be done now reachesTopo.open("rw") colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(reaches, layer=1)["values"].values()) cats = colValues[:, colNames == "cat"].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) cur = reachesTopo.table.conn.cursor() # Hydrogeologic properties cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK)) cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1)) cur.execute("update " + reaches + " set THTS=" + str(THTS)) cur.execute("update " + reaches + " set THTI=" + str(THTI)) cur.execute("update " + reaches + " set EPS=" + str(EPS)) cur.execute("update " + reaches + " set UHC=" + str(UHC)) # Grid properties cur.execute("update " + reaches + " set KRCH=1") # Top layer: unchangable cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats) cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats) reachesTopo.table.conn.commit() reachesTopo.close() v.to_db(map=reaches, columns="RCHLEN", option="length", quiet=True) # Still to go after these: # STRTOP (added with slope) # IREACH (whole next section dedicated to this) # SLOPE (need z_start and z_end) # Now, the light stuff is over: time to build the reach order v.to_db(map=reaches, option="start", columns="xr1,yr1") v.to_db(map=reaches, option="end", columns="xr2,yr2") # Now just sort by category, find which stream has the same xr1 and yr1 as # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another # starting point and move down the line. # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1" # First, get the starting coordinates of each stream segment # and a set of river ID's (ordered from 1...N) colNames = np.array(gscript.vector_db_select(segments, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(segments, layer=1)["values"].values()) number_of_segments = colValues.shape[0] segment_x1s = colValues[:, colNames == "x1"].astype(float).squeeze() segment_y1s = colValues[:, colNames == "y1"].astype(float).squeeze() segment_ids = colValues[:, colNames == "id"].astype(float).squeeze() # Then move back to the reaches map to produce the ordering colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(reaches, layer=1)["values"].values()) reach_cats = colValues[:, colNames == "cat"].astype(int).squeeze() reach_x1s = colValues[:, colNames == "xr1"].astype(float).squeeze() reach_y1s = colValues[:, colNames == "yr1"].astype(float).squeeze() reach_x2s = colValues[:, colNames == "xr2"].astype(float).squeeze() reach_y2s = colValues[:, colNames == "yr2"].astype(float).squeeze() segment_ids__reach = colValues[:, colNames == "segment_id"].astype(float).squeeze() for segment_id in segment_ids: reach_order_cats = [] downstream_directed = [] ssel = segment_ids == segment_id rsel = segment_ids__reach == segment_id # selector # Find first segment: x1y1 first here, but not necessarily later downstream_directed.append(1) _x_match = reach_x1s[rsel] == segment_x1s[ssel] _y_match = reach_y1s[rsel] == segment_y1s[ssel] _i_match = _x_match * _y_match x1y1 = True # false if x2y2 # Find cat _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_order_cats.append(_cat) # Get end of reach = start of next one reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) while _i_match.any(): _x_match = reach_x1s[rsel] == reach_x_end _y_match = reach_y1s[rsel] == reach_y_end _i_match = _x_match * _y_match if _i_match.any(): _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) reach_order_cats.append(_cat) _message = str(len(reach_order_cats)) + " " + str(len(reach_cats[rsel])) gscript.message(_message) # Reach order to database table reach_number__reach_order_cats = [] for i in range(len(reach_order_cats)): reach_number__reach_order_cats.append((i + 1, reach_order_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") cur = reachesTopo.table.conn.cursor() cur.executemany( "update " + reaches + " set IREACH=? where cat=?", reach_number__reach_order_cats, ) reachesTopo.table.conn.commit() reachesTopo.close() # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!! # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END # 2018.10.01: Updating this to use the computational region for the DEM g.region(raster=elevation) # Compute slope and starting elevations from the elevations at the start and # end of the reaches and the length of each reach] gscript.message("Obtaining elevation values from raster: may take time.") v.db_addcolumn(map=reaches, columns="zr1 double precision, zr2 double precision") zr1 = [] zr2 = [] for i in range(len(reach_cats)): _x = reach_x1s[i] _y = reach_y1s[i] # print _x, _y _z = float( gscript.parse_command( "r.what", map=elevation, coordinates=str(_x) + "," + str(_y) ) .keys()[0] .split("|")[-1] ) zr1.append(_z) _x = reach_x2s[i] _y = reach_y2s[i] _z = float( gscript.parse_command( "r.what", map=elevation, coordinates=str(_x) + "," + str(_y) ) .keys()[0] .split("|")[-1] ) zr2.append(_z) zr1_cats = [] zr2_cats = [] for i in range(len(reach_cats)): zr1_cats.append((zr1[i], reach_cats[i])) zr2_cats.append((zr2[i], reach_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats) cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats) reachesTopo.table.conn.commit() reachesTopo.close() # Use these to create slope -- backwards possible on DEM! v.db_update(map=reaches, column="SLOPE", value="(zr1 - zr2)/RCHLEN") v.db_update(map=reaches, column="SLOPE", value=Smin, where="SLOPE <= " + str(Smin)) # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid) # resolution # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o# # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!! v.db_addcolumn(map=reaches, columns="z_topo_mean double precision") v.what_rast( map=reaches, raster=elevation, column="z_topo_mean" ) # , query_column='z') v.db_update( map=reaches, column="STRTOP", value="z_topo_mean -" + str(h_stream), quiet=True )
# get only those for zone 23 maps = [i for i in all_maps if 'zone_23' in i] regions = [ i for i in all_regions if i.endswith('zone_23') and i.startswith('region') ] # loop to reproject regions for i in regions: v.proj(location='newLocation_wgs84', mapset='variables_cut', input=i, output=i, overwrite=True) g.region(vector=(regions)) categorical = [ 'Drainage', 'Ecoregions', 'Landcover', 'Protected_areas', 'roads', 'Treecover', 'Tree_plantations', 'Water_presence' ] #len(grass.list_grouped(type = "raster", pattern = "*"+"101"+"*")["PERMANENT"]) # loop to reproject for reg in regions: # define region g.region(vector=reg, res=30, flags='ap') # get prefix for maps reg_pref = reg.split("buffer")[1]
def main(): r_elevation = options["elevation"] mrvbf = options["mrvbf"].split("@")[0] mrrtf = options["mrrtf"].split("@")[0] t_slope = float(options["t_slope"]) t_pctl_v = float(options["t_pctl_v"]) t_pctl_r = float(options["t_pctl_r"]) t_vf = float(options["t_rf"]) t_rf = float(options["t_rf"]) p_slope = float(options["p_slope"]) p_pctl = float(options["p_pctl"]) moving_window_square = flags["s"] min_cells = int(options["min_cells"]) global current_region, TMP_RAST, L TMP_RAST = {} current_region = Region() # some checks if (t_slope <= 0 or t_pctl_v <= 0 or t_pctl_r <= 0 or t_vf <= 0 or t_rf <= 0 or p_slope <= 0 or p_pctl <= 0): gs.fatal("Parameter values cannot be <= 0") if min_cells < 2: gs.fatal( "Minimum number of cells in generalized DEM cannot be less than 2") if min_cells > current_region.cells: gs.fatal( "Minimum number of cells in the generalized DEM cannot exceed the ungeneralized number of cells" ) # calculate the number of levels levels = 2 remaining_cells = current_region.cells while remaining_cells >= min_cells: levels += 1 g.region(nsres=Region().nsres * 3, ewres=Region().ewres * 3) remaining_cells = Region().cells current_region.write() if levels < 3: gs.fatal( "MRVBF algorithm requires a greater level of generalization. Reduce number of min_cells or use a larger computational region." ) gs.message("Parameter Settings") gs.message("------------------") gs.message("min_cells = %d will result in %d generalization steps" % (min_cells, levels)) # intermediate outputs Xres_step = list() Yres_step = list() DEM = list() SLOPE = list() F = list() PCTL = list() PVF = list() PVF_RF = list() VF = list() VF_RF = list() MRVBF = list() MRRTF = list() # step 1 at base resolution ------------------------------------------------------- L = 0 TMP_RAST[L] = list() Xres_step.append(current_region.ewres) Yres_step.append(current_region.nsres) DEM.append(r_elevation) radius = 3 step_message(L, Xres_step[L], Yres_step[L], current_region.cells, t_slope) # calculation of slope (S1) and calculation of flatness (F1) (equation 2) SLOPE.append(calc_slope(DEM[L])) F.append(flatness(SLOPE[L], t_slope, p_slope)) # calculation of elevation percentile PCTL for step 1 PCTL.append(elevation_percentile(DEM[L], radius, moving_window_square)) # transform elevation percentile to local lowness for step 1 (equation 3) PVF.append(prelim_flatness_valleys(F[L], PCTL[L], t_pctl_v, p_pctl)) if mrrtf != "": PVF_RF.append(prelim_flatness_ridges(F[L], PCTL[L], t_pctl_r, p_pctl)) # calculation of the valley flatness step 1 VF1 (equation 4) VF.append(valley_flatness(PVF[L], t_vf, p_slope)) MRVBF.append(None) if mrrtf != "": VF_RF.append(valley_flatness(PVF_RF[L], t_rf, p_slope)) MRRTF.append(None) # step 2 at base scale resolution ------------------------------------------------- L = 1 TMP_RAST[L] = list() Xres_step.append(current_region.ewres) Yres_step.append(current_region.nsres) DEM.append(r_elevation) t_slope /= 2.0 radius = 6 step_message(L, Xres_step[L], Yres_step[L], current_region.cells, t_slope) # calculation of flatness for step 2 (equation 5) SLOPE.append(SLOPE[L - 1]) F.append(flatness(SLOPE[L], t_slope, p_slope)) # calculation of elevation percentile PCTL for step 2 (radius of 6 cells) PCTL.append(elevation_percentile(r_elevation, radius, moving_window_square)) # PVF for step 2 (equation 6) PVF.append(prelim_flatness_valleys(F[L], PCTL[L], t_pctl_v, p_pctl)) if mrrtf != "": PVF_RF.append(prelim_flatness_ridges(F[L], PCTL[L], t_pctl_r, p_pctl)) # calculation of the valley flatness VF for step 2 (equation 7) VF.append(valley_flatness(PVF[L], t_vf, p_slope)) if mrrtf != "": VF_RF.append(valley_flatness(PVF_RF[L], t_rf, p_slope)) # calculation of MRVBF for step 2 MRVBF.append(calc_mrvbf(VF1=VF[L - 1], VF2=VF[L], t=t_pctl_v)) if mrrtf != "": MRRTF.append(calc_mrvbf(VF1=VF_RF[L - 1], VF2=VF_RF[L], t=t_pctl_r)) # update flatness for step 2 with combined flatness from F1 and F2 (equation 10) F[L] = combined_flatness(F[L - 1], F[L]) # remaining steps ----------------------------------------------------------------- # for steps >= 2, each step uses the smoothing radius of the current step # but at the dem resolution of the previous step remaining_cells = current_region.cells while remaining_cells >= min_cells: L += 1 TMP_RAST[L] = list() t_slope /= 2.0 Xres_step.append(Xres_step[L - 1] * 3) Yres_step.append(Yres_step[L - 1] * 3) radius = 6 # delete temporary maps from L-2 for tmap in TMP_RAST[L - 2]: if len(gs.find_file(tmap)["fullname"]) > 0: g.remove(type="raster", name=tmap, flags="f", quiet=True) # coarsen resolution to resolution of previous step (step L-1) and smooth DEM if L > 2: g.region(ewres=Xres_step[L - 1], nsres=Yres_step[L - 1]) step_message(L, Xres_step[L], Yres_step[L], remaining_cells, t_slope) DEM.append(smooth_dem(DEM[L - 1])) # calculate slope at coarser resolution SLOPE.append(calc_slope(DEM[L])) # refine slope back to base resolution if L > 2: SLOPE[L] = refine(SLOPE[L], current_region, method="bilinear") # coarsen resolution to current step L and calculate PCTL g.region(ewres=Xres_step[L], nsres=Yres_step[L]) remaining_cells = Region().cells DEM[L] = refine(DEM[L], Region(), method="average") PCTL.append(elevation_percentile(DEM[L], radius, moving_window_square)) # refine PCTL to base resolution PCTL[L] = refine(PCTL[L], current_region, method="bilinear") # calculate flatness F at the base resolution F.append(flatness(SLOPE[L], t_slope, p_slope)) # update flatness with combined flatness CF from the previous step F[L] = combined_flatness(F1=F[L - 1], F2=F[L]) # calculate preliminary valley flatness index PVF at the base resolution PVF.append(prelim_flatness_valleys(F[L], PCTL[L], t_pctl_v, p_pctl)) if mrrtf != "": PVF_RF.append( prelim_flatness_ridges(F[L], PCTL[L], t_pctl_r, p_pctl)) # calculate valley flatness index VF VF.append(valley_flatness(PVF[L], t_vf, p_slope)) if mrrtf != "": VF_RF.append(valley_flatness(PVF_RF[L], t_rf, p_slope)) # calculation of MRVBF MRVBF.append(calc_mrvbf(VF1=MRVBF[L - 1], VF2=VF[L], t=t_pctl_v)) if mrrtf != "": MRRTF.append(calc_mrvbf(VF1=MRRTF[L - 1], VF2=VF_RF[L], t=t_pctl_r)) # output final MRVBF -------------------------------------------------------------- current_region.write() gs.mapcalc("$x = $y", x=mrvbf, y=MRVBF[L]) if mrrtf != "": gs.mapcalc("$x = $y", x=mrrtf, y=MRRTF[L])
owsConnections = {} owsConnections['default'] = { 'dsn' : 'PG:host=523.riedackerhof.ch dbname=gis user=gis password=gisHoch3', 'url' : 'https://523.riedackerhof.ch:4433/ows?', 'username' :'gis', 'password' : 'gisHoch3', 'layers' : { 'cultures' : 'ch_gelan_kulturen_2014', 'fieldblocks' : 'ch_blw_erk2_feldblockkarte', 'borders' : 'ch_swisstopo_kantone' }, } params = owsConnections['default'] dsn = params['dsn'] g.mapset(flags='c',mapset='solothurn') v.in_ogr(dsn=dsn, layer=params['layers']['borders'], output='region', where="kantonsnum = 11") g.region(vect='region') g.region(flags='p') v.in_ogr(dsn=dsn, layer=params['layers']['cultures'], output='cultures', flags='r') v.in_ogr(dsn=dsn, layer=params['layers']['fieldblocks'],output='fieldblocks', flags='r') v.overlay(ainput='fieldblocks', binput = 'cultures', operator='not', output='fff')