def main(): """ Build gravity reservoirs in GSFLOW: combines MODFLOW grid and HRU sub-basins These define the PRMS soil zone that connects to MODFLOW cells """ ################## # OPTION PARSING # ################## # I/O options, flags = gscript.parser() # I/O HRUs = options['hru_input'] grid = options['grid_input'] segments = options['output'] #col = options['col'] gravity_reservoirs = options['output'] ############ # ANALYSIS # ############ """ # Basin areas v.db_addcolumn(map=basins, columns=col) v.to_db(map=basins, option='area', units='meters', columns=col) """ # Create gravity reservoirs -- overlay cells=grid and HRUs v.overlay(ainput=HRUs, binput=grid, atype='area', btype='area', operator='and', output=gravity_reservoirs, overwrite=gscript.overwrite()) v.db_dropcolumn(map=gravity_reservoirs, columns='a_cat,a_label,b_cat', quiet=True) # Cell and HRU ID's v.db_renamecolumn(map=gravity_reservoirs, column=('a_id', 'gvr_hru_id'), quiet=True) v.db_renamecolumn(map=gravity_reservoirs, column=('b_id', 'gvr_cell_id'), quiet=True) # Percent areas v.db_renamecolumn(map=gravity_reservoirs, column=('a_hru_area_m2', 'hru_area_m2'), quiet=True) v.db_renamecolumn(map=gravity_reservoirs, column=('b_area_m2', 'cell_area_m2'), quiet=True) v.db_addcolumn(map=gravity_reservoirs, columns='area_m2 double precision', quiet=True) v.to_db(map=gravity_reservoirs, option='area', units='meters', columns='area_m2', quiet=True) v.db_addcolumn(map=gravity_reservoirs, columns='gvr_cell_pct double precision, gvr_hru_pct double precision', quiet=True) v.db_update(map=gravity_reservoirs, column='gvr_cell_pct', query_column='100*area_m2/cell_area_m2', quiet=True) v.db_update(map=gravity_reservoirs, column='gvr_hru_pct', query_column='100*area_m2/hru_area_m2', quiet=True) v.extract(input=gravity_reservoirs, output='tmp_', where="gvr_cell_pct > 0.001", overwrite=True, quiet=True) g.rename(vector=('tmp_',gravity_reservoirs), overwrite=True, quiet=True)
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ options, flags = gscript.parser() print options print flags # Attributes of streams colNames = np.array(vector_db_select(options['streams'])['columns']) colValues = np.array( vector_db_select(options['streams'])['values'].values()) tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze() cats = colValues[:, colNames == 'cat'].astype(int).squeeze() # = "fromstream" # We can loop over this list to get the shape of the full river network. selected_cats = [] segment = int(options['cat']) selected_cats.append(segment) if options['direction'] == 'downstream': while selected_cats[-1] != 0: selected_cats.append(int(tostream[cats == selected_cats[-1]])) selected_cats = selected_cats[:-1] # remove 0 at end elif options['direction'] == 'upstream': print "Not yet active!" """ # Add new lists for each successive upstream river river_is_upstream = while full_river_cats """ selected_cats_str = list(np.array(selected_cats).astype(str)) selected_cats_csv = ','.join(selected_cats_str) v.extract(input=options['streams'], output=options['outstream'], cats=selected_cats_csv, overwrite=True)
def main(): """ Builds river reaches for input to the USGS hydrologic model, GSFLOW. These reaches link the PRMS stream segments to the MODFLOW grid cells. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() segments = options["segment_input"] grid = options["grid_input"] reaches = options["output"] elevation = options["elevation"] Smin = options["s_min"] h_stream = options["h_stream"] x1 = options["upstream_easting_column_seg"] y1 = options["upstream_northing_column_seg"] x2 = options["downstream_easting_column_seg"] y2 = options["downstream_northing_column_seg"] tostream = options["tostream_cat_column_seg"] # Hydraulic paramters STRTHICK = options["strthick"] STRHC1 = options["strhc1"] THTS = options["thts"] THTI = options["thti"] EPS = options["eps"] UHC = options["uhc"] # Build reach maps by overlaying segments on grid if len(gscript.find_file(segments, element="vector")["name"]) > 0: v.extract( input=segments, output="GSFLOW_TEMP__", type="line", quiet=True, overwrite=True, ) v.overlay( ainput="GSFLOW_TEMP__", atype="line", binput=grid, output=reaches, operator="and", overwrite=gscript.overwrite(), quiet=True, ) g.remove(type="vector", name="GSFLOW_TEMP__", quiet=True, flags="f") else: gscript.fatal('No vector file "' + segments + '" found.') # Start editing database table reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") # Rename a,b columns reachesTopo.table.columns.rename("a_" + x1, "x1") reachesTopo.table.columns.rename("a_" + x2, "x2") reachesTopo.table.columns.rename("a_" + y1, "y1") reachesTopo.table.columns.rename("a_" + y2, "y2") reachesTopo.table.columns.rename("a_NSEG", "NSEG") reachesTopo.table.columns.rename("a_ISEG", "ISEG") reachesTopo.table.columns.rename("a_stream_type", "stream_type") reachesTopo.table.columns.rename("a_type_code", "type_code") reachesTopo.table.columns.rename("a_cat", "rnum_cat") reachesTopo.table.columns.rename("a_" + tostream, "tostream") reachesTopo.table.columns.rename("a_id", "segment_id") reachesTopo.table.columns.rename("a_OUTSEG", "OUTSEG") reachesTopo.table.columns.rename("b_row", "row") reachesTopo.table.columns.rename("b_col", "col") reachesTopo.table.columns.rename("b_id", "cell_id") # Drop unnecessary columns cols = reachesTopo.table.columns.names() for col in cols: if (col[:2] == "a_") or (col[:2] == "b_"): reachesTopo.table.columns.drop(col) # Add new columns to 'reaches' reachesTopo.table.columns.add("KRCH", "integer") reachesTopo.table.columns.add("IRCH", "integer") reachesTopo.table.columns.add("JRCH", "integer") reachesTopo.table.columns.add("IREACH", "integer") reachesTopo.table.columns.add("RCHLEN", "double precision") reachesTopo.table.columns.add("STRTOP", "double precision") reachesTopo.table.columns.add("SLOPE", "double precision") reachesTopo.table.columns.add("STRTHICK", "double precision") reachesTopo.table.columns.add("STRHC1", "double precision") reachesTopo.table.columns.add("THTS", "double precision") reachesTopo.table.columns.add("THTI", "double precision") reachesTopo.table.columns.add("EPS", "double precision") reachesTopo.table.columns.add("UHC", "double precision") reachesTopo.table.columns.add("xr1", "double precision") reachesTopo.table.columns.add("xr2", "double precision") reachesTopo.table.columns.add("yr1", "double precision") reachesTopo.table.columns.add("yr2", "double precision") # Commit columns before editing (necessary?) reachesTopo.table.conn.commit() reachesTopo.close() # Update some columns that can be done now reachesTopo.open("rw") colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(reaches, layer=1)["values"].values()) cats = colValues[:, colNames == "cat"].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) cur = reachesTopo.table.conn.cursor() # Hydrogeologic properties cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK)) cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1)) cur.execute("update " + reaches + " set THTS=" + str(THTS)) cur.execute("update " + reaches + " set THTI=" + str(THTI)) cur.execute("update " + reaches + " set EPS=" + str(EPS)) cur.execute("update " + reaches + " set UHC=" + str(UHC)) # Grid properties cur.execute("update " + reaches + " set KRCH=1") # Top layer: unchangable cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats) cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats) reachesTopo.table.conn.commit() reachesTopo.close() v.to_db(map=reaches, columns="RCHLEN", option="length", quiet=True) # Still to go after these: # STRTOP (added with slope) # IREACH (whole next section dedicated to this) # SLOPE (need z_start and z_end) # Now, the light stuff is over: time to build the reach order v.to_db(map=reaches, option="start", columns="xr1,yr1") v.to_db(map=reaches, option="end", columns="xr2,yr2") # Now just sort by category, find which stream has the same xr1 and yr1 as # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another # starting point and move down the line. # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1" # First, get the starting coordinates of each stream segment # and a set of river ID's (ordered from 1...N) colNames = np.array(gscript.vector_db_select(segments, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(segments, layer=1)["values"].values()) number_of_segments = colValues.shape[0] segment_x1s = colValues[:, colNames == "x1"].astype(float).squeeze() segment_y1s = colValues[:, colNames == "y1"].astype(float).squeeze() segment_ids = colValues[:, colNames == "id"].astype(float).squeeze() # Then move back to the reaches map to produce the ordering colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(reaches, layer=1)["values"].values()) reach_cats = colValues[:, colNames == "cat"].astype(int).squeeze() reach_x1s = colValues[:, colNames == "xr1"].astype(float).squeeze() reach_y1s = colValues[:, colNames == "yr1"].astype(float).squeeze() reach_x2s = colValues[:, colNames == "xr2"].astype(float).squeeze() reach_y2s = colValues[:, colNames == "yr2"].astype(float).squeeze() segment_ids__reach = colValues[:, colNames == "segment_id"].astype(float).squeeze() for segment_id in segment_ids: reach_order_cats = [] downstream_directed = [] ssel = segment_ids == segment_id rsel = segment_ids__reach == segment_id # selector # Find first segment: x1y1 first here, but not necessarily later downstream_directed.append(1) _x_match = reach_x1s[rsel] == segment_x1s[ssel] _y_match = reach_y1s[rsel] == segment_y1s[ssel] _i_match = _x_match * _y_match x1y1 = True # false if x2y2 # Find cat _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_order_cats.append(_cat) # Get end of reach = start of next one reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) while _i_match.any(): _x_match = reach_x1s[rsel] == reach_x_end _y_match = reach_y1s[rsel] == reach_y_end _i_match = _x_match * _y_match if _i_match.any(): _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) reach_order_cats.append(_cat) _message = str(len(reach_order_cats)) + " " + str(len(reach_cats[rsel])) gscript.message(_message) # Reach order to database table reach_number__reach_order_cats = [] for i in range(len(reach_order_cats)): reach_number__reach_order_cats.append((i + 1, reach_order_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") cur = reachesTopo.table.conn.cursor() cur.executemany( "update " + reaches + " set IREACH=? where cat=?", reach_number__reach_order_cats, ) reachesTopo.table.conn.commit() reachesTopo.close() # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!! # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END # 2018.10.01: Updating this to use the computational region for the DEM g.region(raster=elevation) # Compute slope and starting elevations from the elevations at the start and # end of the reaches and the length of each reach] gscript.message("Obtaining elevation values from raster: may take time.") v.db_addcolumn(map=reaches, columns="zr1 double precision, zr2 double precision") zr1 = [] zr2 = [] for i in range(len(reach_cats)): _x = reach_x1s[i] _y = reach_y1s[i] # print _x, _y _z = float( gscript.parse_command( "r.what", map=elevation, coordinates=str(_x) + "," + str(_y) ) .keys()[0] .split("|")[-1] ) zr1.append(_z) _x = reach_x2s[i] _y = reach_y2s[i] _z = float( gscript.parse_command( "r.what", map=elevation, coordinates=str(_x) + "," + str(_y) ) .keys()[0] .split("|")[-1] ) zr2.append(_z) zr1_cats = [] zr2_cats = [] for i in range(len(reach_cats)): zr1_cats.append((zr1[i], reach_cats[i])) zr2_cats.append((zr2[i], reach_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats) cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats) reachesTopo.table.conn.commit() reachesTopo.close() # Use these to create slope -- backwards possible on DEM! v.db_update(map=reaches, column="SLOPE", value="(zr1 - zr2)/RCHLEN") v.db_update(map=reaches, column="SLOPE", value=Smin, where="SLOPE <= " + str(Smin)) # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid) # resolution # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o# # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!! v.db_addcolumn(map=reaches, columns="z_topo_mean double precision") v.what_rast( map=reaches, raster=elevation, column="z_topo_mean" ) # , query_column='z') v.db_update( map=reaches, column="STRTOP", value="z_topo_mean -" + str(h_stream), quiet=True )
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ options, flags = gscript.parser() streams = options["input_streams"] basins = options["input_basins"] downstream_cat = options["cat"] x_outlet = float(options["x_outlet"]) y_outlet = float(options["y_outlet"]) output_basins = options["output_basin"] output_streams = options["output_streams"] output_pour_point = options["output_pour_point"] draindir = options["draindir"] snapflag = flags["s"] # print options # print flags # Check that either x,y or cat are set if (downstream_cat != "") or ((x_outlet != "") and (y_outlet != "")): pass else: gscript.fatal( 'You must set either "cat" or "x_outlet" and "y_outlet".') # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!! if snapflag or (downstream_cat != ""): if downstream_cat == "": # Need to find outlet pour point -- start by creating a point at this # location to use with v.distance try: v.db_droptable(table="tmp", flags="f") except: pass tmp = vector.Vector("tmp") _cols = [ ("cat", "INTEGER PRIMARY KEY"), ("x", "DOUBLE PRECISION"), ("y", "DOUBLE PRECISION"), ("strcat", "DOUBLE PRECISION"), ] tmp.open("w", tab_name="tmp", tab_cols=_cols) point0 = Point(x_outlet, y_outlet) tmp.write( point0, cat=1, attrs=(str(x_outlet), str(y_outlet), 0), ) tmp.table.conn.commit() tmp.build() tmp.close() # Now v.distance gscript.run_command("v.distance", from_="tmp", to=streams, upload="cat", column="strcat") # v.distance(_from_='tmp', to=streams, upload='cat', column='strcat') downstream_cat = gscript.vector_db_select(map="tmp", columns="strcat") downstream_cat = int(downstream_cat["values"].values()[0][0]) # Attributes of streams colNames = np.array(vector_db_select(streams)["columns"]) colValues = np.array(vector_db_select(streams)["values"].values()) tostream = colValues[:, colNames == "tostream"].astype(int).squeeze() cats = colValues[:, colNames == "cat"].astype( int).squeeze() # = "fromstream" # Find network basincats = [downstream_cat] # start here most_upstream_cats = [ downstream_cat ] # all of those for which new cats must be sought while True: if len(most_upstream_cats) == 0: break tmp = list(most_upstream_cats) # copy to a temp file: old values most_upstream_cats = [] # Ready to accept new values for ucat in tmp: most_upstream_cats += list(cats[tostream == int(ucat)]) basincats += most_upstream_cats basincats = list(set(list(basincats))) basincats_str = ",".join(map(str, basincats)) # Many basins out -- need to use overwrite flag in future! # SQL_OR = 'rnum = ' + ' OR rnum = '.join(map(str, basincats)) # SQL_OR = 'cat = ' + ' OR cat = '.join(map(str, basincats)) SQL_LIST = "cat IN (" + ", ".join(map(str, basincats)) + ")" if len(basins) > 0: v.extract( input=basins, output=output_basins, where=SQL_LIST, overwrite=gscript.overwrite(), quiet=True, ) if len(streams) > 0: v.extract( input=streams, output=output_streams, cats=basincats_str, overwrite=gscript.overwrite(), quiet=True, ) else: # Have coordinates and will limit the area that way. r.water_outlet( input=draindir, output="tmp", coordinates=(x_outlet, y_outlet), overwrite=True, ) r.to_vect(input="tmp", output="tmp", type="area", overwrite=True) v.clip(input=basins, clip="tmp", output=output_basins, overwrite=True) basincats = gscript.vector_db_select( "basins_inbasin").values()[0].keys() basincats_str = ",".join(map(str, basincats)) if len(streams) > 0: v.extract( input=streams, output=output_streams, cats=basincats_str, overwrite=gscript.overwrite(), quiet=True, ) # If we want to output the pour point location if len(output_pour_point) > 0: # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!! try: v.db_droptable(table=output_pour_point, flags="f") except: pass if snapflag or (downstream_cat != ""): _pp = gscript.vector_db_select(map=streams, columns="x2,y2", where="cat=" + str(downstream_cat)) _xy = np.squeeze(_pp["values"].values()) _x = float(_xy[0]) _y = float(_xy[1]) else: _x = x_outlet _y = y_outlet pptmp = vector.Vector(output_pour_point) _cols = [ ("cat", "INTEGER PRIMARY KEY"), ("x", "DOUBLE PRECISION"), ("y", "DOUBLE PRECISION"), ] pptmp.open("w", tab_name=output_pour_point, tab_cols=_cols) point0 = Point(_x, _y) pptmp.write( point0, cat=1, attrs=(str(_x), str(_y)), ) pptmp.table.conn.commit() pptmp.build() pptmp.close()
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ options, flags = gscript.parser() streams = options['input_streams'] basins = options['input_basins'] downstream_cat = options['cat'] x_outlet = float(options['x_outlet']) y_outlet = float(options['y_outlet']) output_basins = options['output_basin'] output_streams = options['output_streams'] output_pour_point = options['output_pour_point'] #print options #print flags # Check that either x,y or cat are set if (downstream_cat != '') or ((x_outlet != '') and (y_outlet != '')): pass else: gscript.fatal( 'You must set either "cat" or "x_outlet" and "y_outlet".') # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!! if downstream_cat == '': # Need to find outlet pour point -- start by creating a point at this # location to use with v.distance try: v.db_droptable(table='tmp', flags='f') except: pass tmp = vector.Vector('tmp') _cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'x', 'DOUBLE PRECISION'), (u'y', 'DOUBLE PRECISION'), (u'strcat', 'DOUBLE PRECISION')] tmp.open('w', tab_name='tmp', tab_cols=_cols) point0 = Point(x_outlet, y_outlet) tmp.write( point0, cat=1, attrs=(str(x_outlet), str(y_outlet), 0), ) tmp.table.conn.commit() tmp.build() tmp.close() # Now v.distance gscript.run_command('v.distance', from_='tmp', to=streams, upload='cat', column='strcat') #v.distance(_from_='tmp', to=streams, upload='cat', column='strcat') downstream_cat = gscript.vector_db_select(map='tmp', columns='strcat') downstream_cat = int(downstream_cat['values'].values()[0][0]) # Attributes of streams colNames = np.array(vector_db_select(streams)['columns']) colValues = np.array(vector_db_select(streams)['values'].values()) tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze() cats = colValues[:, colNames == 'cat'].astype(int).squeeze() # = "fromstream" # Find network basincats = [downstream_cat] # start here most_upstream_cats = [downstream_cat ] # all of those for which new cats must be sought while True: if len(most_upstream_cats) == 0: break tmp = list(most_upstream_cats) # copy to a temp file: old values most_upstream_cats = [] # Ready to accept new values for ucat in tmp: most_upstream_cats += list(cats[tostream == int(ucat)]) basincats += most_upstream_cats basincats = list(set(list(basincats))) basincats_str = ','.join(map(str, basincats)) # Many basins out -- need to use overwrite flag in future! #SQL_OR = 'rnum = ' + ' OR rnum = '.join(map(str, basincats)) SQL_OR = 'cat = ' + ' OR cat = '.join(map(str, basincats)) if len(basins) > 0: v.extract(input=basins, output=output_basins, where=SQL_OR, overwrite=gscript.overwrite(), quiet=True) if len(streams) > 0: v.extract(input=streams, output=output_streams, cats=basincats_str, overwrite=gscript.overwrite(), quiet=True) # If we want to output the pour point location if len(output_pour_point) > 0: _pp = gscript.vector_db_select(map=streams, columns='x2,y2', where='cat=' + str(downstream_cat)) _xy = np.squeeze(_pp['values'].values()) _x = float(_xy[0]) _y = float(_xy[1]) # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!! try: v.db_droptable(table=output_pour_point, flags='f') except: pass pptmp = vector.Vector(output_pour_point) _cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'x', 'DOUBLE PRECISION'), (u'y', 'DOUBLE PRECISION')] pptmp.open('w', tab_name=output_pour_point, tab_cols=_cols) point0 = Point(_x, _y) pptmp.write( point0, cat=1, attrs=(str(_x), str(_y)), ) pptmp.table.conn.commit() pptmp.build() pptmp.close()
def main(): """ Builds river reaches for input to the USGS hydrologic model, GSFLOW. These reaches link the PRMS stream segments to the MODFLOW grid cells. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() segments = options['segment_input'] grid = options['grid_input'] reaches = options['output'] elevation = options['elevation'] Smin = options['s_min'] h_stream = options['h_stream'] x1 = options['upstream_easting_column_seg'] y1 = options['upstream_northing_column_seg'] x2 = options['downstream_easting_column_seg'] y2 = options['downstream_northing_column_seg'] tostream = options['tostream_cat_column_seg'] # Hydraulic paramters STRTHICK = options['strthick'] STRHC1 = options['strhc1'] THTS = options['thts'] THTI = options['thti'] EPS = options['eps'] UHC = options['uhc'] # Build reach maps by overlaying segments on grid if len(gscript.find_file(segments, element='vector')['name']) > 0: v.extract(input=segments, output='GSFLOW_TEMP__', type='line', quiet=True, overwrite=True) v.overlay(ainput='GSFLOW_TEMP__', atype='line', binput=grid, output=reaches, operator='and', overwrite=gscript.overwrite(), quiet=True) g.remove(type='vector', name='GSFLOW_TEMP__', quiet=True, flags='f') else: gscript.fatal('No vector file "' + segments + '" found.') # Start editing database table reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') # Rename a,b columns reachesTopo.table.columns.rename('a_' + x1, 'x1') reachesTopo.table.columns.rename('a_' + x2, 'x2') reachesTopo.table.columns.rename('a_' + y1, 'y1') reachesTopo.table.columns.rename('a_' + y2, 'y2') reachesTopo.table.columns.rename('a_NSEG', 'NSEG') reachesTopo.table.columns.rename('a_ISEG', 'ISEG') reachesTopo.table.columns.rename('a_stream_type', 'stream_type') reachesTopo.table.columns.rename('a_type_code', 'type_code') reachesTopo.table.columns.rename('a_cat', 'rnum_cat') reachesTopo.table.columns.rename('a_' + tostream, 'tostream') reachesTopo.table.columns.rename('a_id', 'segment_id') reachesTopo.table.columns.rename('a_OUTSEG', 'OUTSEG') reachesTopo.table.columns.rename('b_row', 'row') reachesTopo.table.columns.rename('b_col', 'col') reachesTopo.table.columns.rename('b_id', 'cell_id') # Drop unnecessary columns cols = reachesTopo.table.columns.names() for col in cols: if (col[:2] == 'a_') or (col[:2] == 'b_'): reachesTopo.table.columns.drop(col) # Add new columns to 'reaches' reachesTopo.table.columns.add('KRCH', 'integer') reachesTopo.table.columns.add('IRCH', 'integer') reachesTopo.table.columns.add('JRCH', 'integer') reachesTopo.table.columns.add('IREACH', 'integer') reachesTopo.table.columns.add('RCHLEN', 'double precision') reachesTopo.table.columns.add('STRTOP', 'double precision') reachesTopo.table.columns.add('SLOPE', 'double precision') reachesTopo.table.columns.add('STRTHICK', 'double precision') reachesTopo.table.columns.add('STRHC1', 'double precision') reachesTopo.table.columns.add('THTS', 'double precision') reachesTopo.table.columns.add('THTI', 'double precision') reachesTopo.table.columns.add('EPS', 'double precision') reachesTopo.table.columns.add('UHC', 'double precision') reachesTopo.table.columns.add('xr1', 'double precision') reachesTopo.table.columns.add('xr2', 'double precision') reachesTopo.table.columns.add('yr1', 'double precision') reachesTopo.table.columns.add('yr2', 'double precision') # Commit columns before editing (necessary?) reachesTopo.table.conn.commit() reachesTopo.close() # Update some columns that can be done now reachesTopo.open('rw') colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(reaches, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) cur = reachesTopo.table.conn.cursor() # Hydrogeologic properties cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK)) cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1)) cur.execute("update " + reaches + " set THTS=" + str(THTS)) cur.execute("update " + reaches + " set THTI=" + str(THTI)) cur.execute("update " + reaches + " set EPS=" + str(EPS)) cur.execute("update " + reaches + " set UHC=" + str(UHC)) # Grid properties cur.execute("update " + reaches + " set KRCH=1") # Top layer: unchangable cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats) cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats) reachesTopo.table.conn.commit() reachesTopo.close() v.to_db(map=reaches, columns='RCHLEN', option='length', quiet=True) # Still to go after these: # STRTOP (added with slope) # IREACH (whole next section dedicated to this) # SLOPE (need z_start and z_end) # Now, the light stuff is over: time to build the reach order v.to_db(map=reaches, option='start', columns='xr1,yr1') v.to_db(map=reaches, option='end', columns='xr2,yr2') # Now just sort by category, find which stream has the same xr1 and yr1 as # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another # starting point and move down the line. # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1" # First, get the starting coordinates of each stream segment # and a set of river ID's (ordered from 1...N) colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(segments, layer=1)['values'].values()) number_of_segments = colValues.shape[0] segment_x1s = colValues[:, colNames == 'x1'].astype(float).squeeze() segment_y1s = colValues[:, colNames == 'y1'].astype(float).squeeze() segment_ids = colValues[:, colNames == 'id'].astype(float).squeeze() # Then move back to the reaches map to produce the ordering colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(reaches, layer=1)['values'].values()) reach_cats = colValues[:, colNames == 'cat'].astype(int).squeeze() reach_x1s = colValues[:, colNames == 'xr1'].astype(float).squeeze() reach_y1s = colValues[:, colNames == 'yr1'].astype(float).squeeze() reach_x2s = colValues[:, colNames == 'xr2'].astype(float).squeeze() reach_y2s = colValues[:, colNames == 'yr2'].astype(float).squeeze() segment_ids__reach = colValues[:, colNames == 'segment_id'].astype( float).squeeze() for segment_id in segment_ids: reach_order_cats = [] downstream_directed = [] ssel = segment_ids == segment_id rsel = segment_ids__reach == segment_id # selector # Find first segment: x1y1 first here, but not necessarily later downstream_directed.append(1) _x_match = reach_x1s[rsel] == segment_x1s[ssel] _y_match = reach_y1s[rsel] == segment_y1s[ssel] _i_match = _x_match * _y_match x1y1 = True # false if x2y2 # Find cat _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_order_cats.append(_cat) # Get end of reach = start of next one reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) while _i_match.any(): _x_match = reach_x1s[rsel] == reach_x_end _y_match = reach_y1s[rsel] == reach_y_end _i_match = _x_match * _y_match if _i_match.any(): _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) reach_order_cats.append(_cat) print len(reach_order_cats), len(reach_cats[rsel]) # Reach order to database table reach_number__reach_order_cats = [] for i in range(len(reach_order_cats)): reach_number__reach_order_cats.append((i + 1, reach_order_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set IREACH=? where cat=?", reach_number__reach_order_cats) reachesTopo.table.conn.commit() reachesTopo.close() # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!! # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END # 2018.10.01: Updating this to use the computational region for the DEM g.region(raster=elevation) # Compute slope and starting elevations from the elevations at the start and # end of the reaches and the length of each reach] gscript.message('Obtaining elevation values from raster: may take time.') v.db_addcolumn(map=reaches, columns='zr1 double precision, zr2 double precision') zr1 = [] zr2 = [] for i in range(len(reach_cats)): _x = reach_x1s[i] _y = reach_y1s[i] #print _x, _y _z = float( gscript.parse_command('r.what', map=elevation, coordinates=str(_x) + ',' + str(_y)).keys()[0].split('|')[-1]) zr1.append(_z) _x = reach_x2s[i] _y = reach_y2s[i] _z = float( gscript.parse_command('r.what', map=elevation, coordinates=str(_x) + ',' + str(_y)).keys()[0].split('|')[-1]) zr2.append(_z) zr1_cats = [] zr2_cats = [] for i in range(len(reach_cats)): zr1_cats.append((zr1[i], reach_cats[i])) zr2_cats.append((zr2[i], reach_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats) cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats) reachesTopo.table.conn.commit() reachesTopo.close() # Use these to create slope -- backwards possible on DEM! v.db_update(map=reaches, column='SLOPE', value='(zr1 - zr2)/RCHLEN') v.db_update(map=reaches, column='SLOPE', value=Smin, where='SLOPE <= ' + str(Smin)) # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid) # resolution # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o# # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!! v.db_addcolumn(map=reaches, columns='z_topo_mean double precision') v.what_rast(map=reaches, raster=elevation, column='z_topo_mean') #, query_column='z') v.db_update(map=reaches, column='STRTOP', value='z_topo_mean -' + str(h_stream), quiet=True)
# list of buffers buffer_index = range(len(years)) # region aligned to this map map_for_define_region = 'Neotropic_Hansen_percenttreecoverd_2000_wgs84@PERMANENT' # input vector with buffers vector = 'buffers_5km_comm_data_neotro_checked_2020_d11_06' # For each buffer for i in buffer_index: print i, comm_code[i], years[i] # select feature v.extract(input = vector, output = 'vector_cat', where = 'cat = ' + str(i+1), flags = 't', overwrite = True, quiet = True) # define region g.region(vector = 'vector_cat', align = map_for_define_region, flags = 'p') # use vector as a mask r.mask(vector = 'vector_cat', overwrite = True, quiet = True) # Cut maps # tree cover with zero where there was deforestation expr = comm_code[i] + '_treecover_GFW_2000_deforestation = if(Neotropical_Hansen_treecoverlossperyear_wgs84_2017@PERMANENT > 0 && '+ \ 'Neotropical_Hansen_treecoverlossperyear_wgs84_2017@PERMANENT < ' + str(years[i]) + ', 0, Neotropic_Hansen_percenttreecoverd_2000_wgs84@PERMANENT)' r.mapcalc(expr, overwrite = True) # thresholds for binary values of natural vegetation thresholds = [70, 80, 90]
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ options, flags = gscript.parser() streams = options['input_streams'] basins = options['input_basins'] cat = options['cat'] output_basins = options['output_basin'] output_streams = options['output_streams'] print options print flags # Attributes of streams colNames = np.array(vector_db_select(streams)['columns']) colValues = np.array(vector_db_select(streams)['values'].values()) tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze() cats = colValues[:, colNames == 'cat'].astype(int).squeeze() # = "fromstream" # Find network basincats = [cat] # start here most_upstream_cats = [cat ] # all of those for which new cats must be sought while True: if len(most_upstream_cats) == 0: break tmp = list(most_upstream_cats) # copy to a temp file: old values most_upstream_cats = [] # Ready to accept new values for ucat in tmp: most_upstream_cats += list(cats[tostream == ucat]) basincats += most_upstream_cats basincats = list(set(list(basincats))) basincats_str = ','.join(map(str, basincats)) # Many basins out -- need to use overwrite flag in future! SQL_OR = 'rnum = ' + ' OR rnum = '.join(map(str, basincats)) SQL_OR = 'cat = ' + ' OR cat = '.join(map(str, basincats)) v.extract(input=basins, output=output_basins, where=SQL_OR, overwrite=True) v.extract(input=streams, output=output_streams, cats=basincats_str, overwrite=True) # We can loop over this list to get the shape of the full river network. selected_cats = [] segment = int(cat) selected_cats.append(segment) selected_cats_str = list(np.array(selected_cats).astype(str)) selected_cats_csv = ','.join(selected_cats_str) v.extract(input=options['streams'], output=options['outstream'], cats=selected_cats_csv, overwrite=True)
segment = 805 full_river.append(segment) full_river_cats.append(int(cats[stream_number_ascending == segment])) while full_river[-1] != 0: full_river.append(int(tostream[stream_number_ascending == full_river[-1]])) if full_river[-1] != 0: full_river_cats.append( int(cats[(stream_number_ascending == full_river[-1]).nonzero()])) full_river = full_river[:-1] #full_river = np.array(full_river) full_river_cats_str = list(np.array(full_river_cats).astype(str)) full_river_cats_csv = ','.join(full_river_cats_str) v.extract(input='streams', output='specific_stream', cats=full_river_cats_csv, overwrite=True) #v.dissolve(input='specific_stream_segmented', output='specific_stream', column='river_number', overwrite=True) #v.db_addtable('specific_stream') #v.category('specific_stream') v.to_points(input='specific_stream', output='specific_stream_points', use='vertex', dmax=30, flags='i', overwrite=True) v.db_addcolumn(map='specific_stream_points', layer=2, columns=('slope double precision, area_km2 double precision')) v.what_rast(map='specific_stream_points', type='point',
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ import matplotlib # required by windows matplotlib.use('wxAGG') # required by windows from matplotlib import pyplot as plt options, flags = gscript.parser() # Parsing window = float(options['window']) accum_mult = float(options['accum_mult']) if options['units'] == 'm2': accum_label = 'Drainage area [m$^2$]' elif options['units'] == 'km2': accum_label = 'Drainage area [km$^2$]' elif options['units'] == 'cumecs': accum_label = 'Water discharge [m$^3$ s$^{-1}$]' elif options['units'] == 'cfs': accum_label = 'Water discharge [cfs]' else: accum_label = 'Flow accumulation [$-$]' plots = options['plots'].split(',') # Attributes of streams colNames = np.array(vector_db_select(options['streams'])['columns']) colValues = np.array( vector_db_select(options['streams'])['values'].values()) tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze() cats = colValues[:, colNames == 'cat'].astype(int).squeeze() # = "fromstream" # We can loop over this list to get the shape of the full river network. selected_cats = [] segment = int(options['cat']) selected_cats.append(segment) x = [] z = [] if options['direction'] == 'downstream': # Get network gscript.message("Network") while selected_cats[-1] != 0: selected_cats.append(int(tostream[cats == selected_cats[-1]])) x.append(selected_cats[-1]) selected_cats = selected_cats[:-1] # remove 0 at end # Extract x points in network data = vector.VectorTopo( options['streams']) # Create a VectorTopo object data.open('r') # Open this object for reading coords = [] _i = 0 for i in range(len(data)): if isinstance(data.read(i + 1), vector.geometry.Line): if data.read(i + 1).cat in selected_cats: coords.append(data.read(i + 1).to_array()) gscript.core.percent(_i, len(selected_cats), 100. / len(selected_cats)) _i += 1 gscript.core.percent(1, 1, 1) coords = np.vstack(np.array(coords)) _dx = np.diff(coords[:, 0]) _dy = np.diff(coords[:, 1]) x_downstream_0 = np.hstack((0, np.cumsum((_dx**2 + _dy**2)**.5))) x_downstream = x_downstream_0.copy() elif options['direction'] == 'upstream': #terminalCATS = list(options['cat']) #while terminalCATS: # print("Upstream direction not yet active!") return """ # Add new lists for each successive upstream river river_is_upstream = while full_river_cats """ # Network extraction if options['outstream'] is not '': selected_cats_str = list(np.array(selected_cats).astype(str)) selected_cats_csv = ','.join(selected_cats_str) v.extract(input=options['streams'], output=options['outstream'], cats=selected_cats_csv, overwrite=gscript.overwrite()) # Analysis gscript.message("Elevation") if options['elevation']: _include_z = True DEM = RasterRow(options['elevation']) DEM.open('r') z = [] _i = 0 _lasti = 0 for row in coords: z.append(DEM.get_value(Point(row[0], row[1]))) if float(_i) / len(coords) > float(_lasti) / len(coords): gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _lasti = _i _i += 1 DEM.close() z = np.array(z) if options['window'] is not '': x_downstream, z = moving_average(x_downstream_0, z, window) gscript.core.percent(1, 1, 1) else: _include_z = False gscript.message("Slope") if options['slope']: _include_S = True slope = RasterRow(options['slope']) slope.open('r') S = [] _i = 0 _lasti = 0 for row in coords: S.append(slope.get_value(Point(row[0], row[1]))) if float(_i) / len(coords) > float(_lasti) / len(coords): gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _lasti = _i _i += 1 slope.close() S = np.array(S) S_0 = S.copy() if options['window'] is not '': x_downstream, S = moving_average(x_downstream_0, S, window) gscript.core.percent(1, 1, 1) else: _include_S = False gscript.message("Accumulation") if options['accumulation']: _include_A = True accumulation = RasterRow(options['accumulation']) accumulation.open('r') A = [] _i = 0 _lasti = 0 for row in coords: A.append( accumulation.get_value(Point(row[0], row[1])) * accum_mult) if float(_i) / len(coords) > float(_lasti) / len(coords): gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _lasti = _i _i += 1 accumulation.close() A = np.array(A) A_0 = A.copy() if options['window'] is not '': x_downstream, A = moving_average(x_downstream_0, A, window) gscript.core.percent(1, 1, 1) else: _include_A = False # Plotting if 'LongProfile' in plots: plt.figure() plt.plot(x_downstream / 1000., z, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel('Elevation [m]', fontsize=20) plt.tight_layout() if 'SlopeAccum' in plots: plt.figure() plt.loglog(A, S, 'ko', linewidth=2) plt.xlabel(accum_label, fontsize=20) plt.ylabel('Slope [$-$]', fontsize=20) plt.tight_layout() if 'SlopeDistance' in plots: plt.figure() plt.plot(x_downstream / 1000., S, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel('Slope [$-$]', fontsize=20) plt.tight_layout() if 'AccumDistance' in plots: plt.figure() plt.plot(x_downstream / 1000., A, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel(accum_label, fontsize=20) plt.tight_layout() plt.show() # Saving data if options['outfile_original'] is not '': header = ['x_downstream', 'E', 'N'] outfile = np.hstack((np.expand_dims(x_downstream_0, axis=1), coords)) if _include_S: header.append('slope') outfile = np.hstack((outfile, np.expand_dims(S_0, axis=1))) if _include_A: if (options['units'] == 'm2') or (options['units'] == 'km2'): header.append('drainage_area_' + options['units']) elif (options['units'] == 'cumecs') or (options['units'] == 'cfs'): header.append('water_discharge_' + options['units']) else: header.append('flow_accumulation_arbitrary_units') outfile = np.hstack((outfile, np.expand_dims(A_0, axis=1))) header = np.array(header) outfile = np.vstack((header, outfile)) np.savetxt(options['outfile_original'], outfile, '%s') if options['outfile_smoothed'] is not '': header = ['x_downstream', 'E', 'N'] # E, N on smoothed grid x_downstream, E = moving_average(x_downstream_0, coords[:, 0], window) x_downstream, N = moving_average(x_downstream_0, coords[:, 1], window) # Back to output outfile = np.hstack((np.expand_dims(x_downstream, axis=1), np.expand_dims(E, axis=1), np.expand_dims(N, axis=1))) if _include_S: header.append('slope') outfile = np.hstack((outfile, np.expand_dims(S, axis=1))) if _include_A: if (options['units'] == 'm2') or (options['units'] == 'km2'): header.append('drainage_area_' + options['units']) elif (options['units'] == 'cumecs') or (options['units'] == 'cfs'): header.append('water_discharge_' + options['units']) else: header.append('flow_accumulation_arbitrary_units') outfile = np.hstack((outfile, np.expand_dims(A, axis=1))) header = np.array(header) outfile = np.vstack((header, outfile)) np.savetxt(options['outfile_smoothed'], outfile, '%s')
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ # Parsing inside function _cat = int(options['cat']) overwrite_flag = gscript.overwrite() elevation = options['elevation'] if elevation == '': elevation = None slope = options['slope'] if slope == '': slope = None accumulation = options['accumulation'] if accumulation == '': accumulation = None direction = options['direction'] if direction == '': direction = None streams = options['streams'] if streams == '': streams = None outstream = options['outstream'] if outstream == '': outstream = None outfile = options['outfile'] if outfile == '': outfile = None # !!!!!!!!!!!!!!!!! # ADD SWITCHES TO INDIVIDUALLY SMOOTH SLOPE, ACCUM, ETC. # !!!!!!!!!!!!!!!!! try: window = float(options['window']) except: window = None try: dx_target = float(options['dx_target']) except: dx_target = None accum_mult = float(options['accum_mult']) if options['units'] == 'm2': accum_label = 'Drainage area [m$^2$]' elif options['units'] == 'km2': accum_label = 'Drainage area [km$^2$]' elif options['units'] == 'cumecs': accum_label = 'Water discharge [m$^3$ s$^{-1}$]' elif options['units'] == 'cfs': accum_label = 'Water discharge [cfs]' else: accum_label = 'Flow accumulation [$-$]' plots = options['plots'].split(',') # Attributes of streams colNames = np.array(vector_db_select(streams)['columns']) colValues = np.array(vector_db_select(streams)['values'].values()) warnings.warn('tostream is not generalized') tostream = colValues[:,colNames == 'tostream'].astype(int).squeeze() cats = colValues[:,colNames == 'cat'].astype(int).squeeze() # = "fromstream" # We can loop over this list to get the shape of the full river network. selected_cats = [] segment = _cat selected_cats.append(segment) # Get all cats in network data = vector.VectorTopo(streams) # Create a VectorTopo object data.open('r') # Open this object for reading if direction == 'downstream': gscript.message("Extracting drainage pathway...",) # Get network while selected_cats[-1] != 0: selected_cats.append(int(tostream[cats == selected_cats[-1]])) #x.append(selected_cats[-1]) selected_cats = selected_cats[:-1] # remove 0 at end gscript.message("Done.") elif direction == 'upstream': gscript.message("Extracting drainage network...",) # GENERALIZE COLUMN NAME!!!!!!!! tostream_col = np.where(np.array(data.table.columns.names()) == 'tostream')[0][0] terminalCats = [_cat] terminal_x_values = [0] netcats = [] net_tocats = [] while len(terminalCats) > 0: for cat in terminalCats: netcats.append(cat) # ALSO UNADVISABLE NAME -- NEED TO GET TOSTREAM, GENERALIZED #print data.table_to_dict() colnum = np.where( np.array(data.table.columns.names()) == 'tostream')[0][0] net_tocats.append(data.table_to_dict()[cat][colnum]) oldcats = terminalCats terminalCats = [] for cat in oldcats: terminalCats += list(cats[tostream == cat]) #data.close() netcats = np.array(netcats) net_tocats = np.array(net_tocats) selected_cats = netcats gscript.message("Done.") segments = [] for cat in selected_cats: points_with_cat = data.cat(cat_id=cat, vtype='lines')[0] subcoords = [] for point in points_with_cat: subcoords.append([point.x, point.y]) segments.append( rn.Segment(_id=cat, to_ids=tostream[cats == cat]) ) segments[-1].set_EastingNorthing(ENarray=subcoords) segments[-1].calc_x_from_EastingNorthing() # x grid spacing #print segments[-1].Easting[-1], segments[-1].Northing[-1] #print segments[-1].EastingNorthing[-1] #print "" if dx_target is not None: dx_target = float(dx_target) segments[-1].set_target_dx_downstream(dx_target) segments[-1].densify_x_E_N() data.close() net = rn.Network(segments) bbox = BoundingBox(points_xy=net.segments_xy_flattened()) reg_to_revert = region.Region() reg = region.Region() # to limit region for computational efficiency reg.set_bbox(bbox.bbox) reg.write() # Network extraction if outstream: selected_cats_str = list(np.array(selected_cats).astype(str)) selected_cats_csv = ','.join(selected_cats_str) v.extract( input=streams, output=outstream, \ cats=selected_cats_csv, overwrite=overwrite_flag ) # All coordinates coords = net.segments_xy_flattened() #x_downstream = # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # UPDATE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ ##### FIND RIGHT SPOT TO ADD CLASS STUFF HERE/BELOW #### # Extract x points in network data = vector.VectorTopo(streams) # Create a VectorTopo object data.open('r') # Open this object for reading coords = [] _i = 0 for i in range(len(data)): if type(data.read(i+1)) is vector.geometry.Line: if data.read(i+1).cat in selected_cats: coords.append(data.read(i+1).to_array()) gscript.core.percent(_i, len(selected_cats), 100./len(selected_cats)) _i += 1 gscript.core.percent(1, 1, 1) coords = np.vstack(np.array(coords)) _dx = np.diff(coords[:,0]) _dy = np.diff(coords[:,1]) x_downstream_0 = np.hstack((0, np.cumsum((_dx**2 + _dy**2)**.5))) x_downstream = x_downstream_0.copy() data.close() """ # TEMPORARY!!!! #x_downstream = get_xEN() #x_downstream_0 = x_downstream[0] # Analysis # Downstream distances -- 0 at mouth net.compute_x_in_network() # Elevation if elevation: gscript.message("Elevation") _include_z = True # Load DEM griddata = garray.array() griddata.read(elevation) griddata = np.flipud(griddata) # Interpolate: nearest or linear? x = np.arange(reg.west + reg.ewres/2., reg.east, reg.ewres) y = np.arange(reg.south + reg.nsres/2., reg.north, reg.nsres) itp = RegularGridInterpolator( (x, y), griddata.transpose(), method='nearest') _i = 0 _lasti = 0 _nexti = 0 for segment in net.segment_list: try: segment.set_z( itp(segment.EastingNorthing) ) except: print segment.EastingNorthing print np.vstack((segment.Easting_original, segment.Northing_original)).transpose() sys.exit() if _i > _nexti: gscript.core.percent( _i, len(net.segment_list), np.floor(_i - _lasti)) _nexti = float(_nexti) + len(net.segment_list)/10. if _nexti > len(net.segment_list): _nexti = len(net.segment_list) - 1 _lasti = _i _i += 1 gscript.core.percent(1, 1, 1) del griddata #warnings.warn('Need to handle window in network') #gscript.core.percent(1, 1, 1) else: _include_z = False # Slope if slope: gscript.message("Slope") _include_S = True _slope = RasterRow(slope) _slope.open('r') _i = 0 _lasti = 0 _nexti = 0 for segment in net.segment_list: sen = segment.EastingNorthing # all E,N S = [] for row in sen: #try: S.append(_slope.get_value(Point(row[0], row[1]))) #except: # print "ERROR" if _i > _nexti: gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _nexti = float(_nexti) + len(coords)/10. if _nexti > len(coords): _nexti = len(coords) - 1 _lasti = _i _i += 1 # MAKE SETTER FOR THIS!!!! segment.channel_slope = np.array(S) if window is not None: pass #net.smooth_window() #_x_downstream, _S = moving_average(x_downstream_0, S, window) _slope.close() S = np.array(S) S_0 = S.copy() gscript.core.percent(1, 1, 1) else: _include_S = False # Accumulation / drainage area if accumulation: gscript.message("Accumulation") _include_A = True accumulation = RasterRow(accumulation) accumulation.open('r') _i = 0 _lasti = 0 _nexti = 0 for segment in net.segment_list: A = [] sen = segment.EastingNorthing # all E,N for row in sen: A.append(accumulation.get_value(Point(row[0], row[1])) * accum_mult) if _i > _nexti: gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _nexti = float(_nexti) + len(coords)/10. if _nexti > len(coords): _nexti = len(coords) - 1 _lasti = _i _i += 1 # MAKE SETTER FOR THIS!!!! segment.channel_flow_accumulation = np.array(A) accumulation.close() A = np.array(A) A_0 = A.copy() """ if window is not None: _x_downstream, A = moving_average(x_downstream_0, A, window) """ gscript.core.percent(1, 1, 1) else: _include_A = False # Revert to original region reg_to_revert # Smoothing if window is not None: net.smooth_window(window) # Plotting if 'LongProfile' in plots: plt.figure() if window: for segment in net.segment_list: plt.plot(segment.x/1000., segment.z_smoothed, 'k-', linewidth=2) else: for segment in net.segment_list: plt.plot(segment.x/1000., segment.z, 'k-', linewidth=2) #plt.plot(x_downstream/1000., z, 'k-', linewidth=2) plt.xlabel('Distance from mouth [km]', fontsize=16) plt.ylabel('Elevation [m]', fontsize=16) plt.tight_layout() if 'SlopeAccum' in plots: plt.figure() if window: for segment in net.segment_list: _y_points = segment.channel_slope_smoothed[ segment.channel_flow_accumulation_smoothed > 0 ] _x_points = segment.channel_flow_accumulation_smoothed[ segment.channel_flow_accumulation_smoothed > 0 ] plt.loglog(_x_points, _y_points, 'k.', alpha=.5) else: for segment in net.segment_list: _y_points = segment.channel_slope[ segment.channel_flow_accumulation > 0 ] _x_points = segment.channel_flow_accumulation[ segment.channel_flow_accumulation > 0 ] plt.loglog(_x_points, _y_points, 'k.', alpha=.5) plt.xlabel(accum_label, fontsize=16) plt.ylabel('Slope [$-$]', fontsize=16) plt.tight_layout() if 'SlopeDistance' in plots: plt.figure() if window: for segment in net.segment_list: plt.plot(segment.x/1000., segment.channel_slope_smoothed, 'k-', linewidth=2) else: for segment in net.segment_list: plt.plot(segment.x/1000., segment.channel_slope, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel('Slope [$-$]', fontsize=20) plt.tight_layout() if 'AccumDistance' in plots: plt.figure() for segment in net.segment_list: _x_points = segment.x[segment.channel_flow_accumulation > 0] _y_points = segment.channel_flow_accumulation[ segment.channel_flow_accumulation > 0 ] plt.plot(_x_points/1000., _y_points, 'k.', alpha=.5) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel(accum_label, fontsize=16) plt.tight_layout() plt.show() # Saving data -- will need to update for more complex data structures! if outfile: net.compute_profile_from_starting_segment() _outfile = np.vstack((net.long_profile_header, net.long_profile_output)) np.savetxt(outfile, _outfile, '%s') else: pass #print net.accum_from_headwaters[1] - net.slope_from_headwaters[1] """ for segment in net.segment_list: print segment.channel_flow_accumulation_smoothed print segment.channel_slope_smoothed print segment.channel_flow_accumulation_smoothed - \ segment.channel_slope_smoothed """ """
# Self ID reach_columns.append('KRCH integer') reach_columns.append('IRCH integer') reach_columns.append('JRCH integer') reach_columns.append('NSEG integer') # = segment_id = ISEG reach_columns.append('ISEG integer') # = segment_id reach_columns.append('IREACH integer') reach_columns.append('RCHLEN integer') reach_columns.append('STRTOP double precision') reach_columns.append('SLOPE double precision') reach_columns.append('STRTHICK double precision') reach_columns = ",".join(reach_columns) # Create a map to work with v.extract(input='streams', output='tmp2', type='line', overwrite=True) v.overlay(ainput='tmp2', atype='line', binput='grid', output='reaches', operator='and', overwrite=True) v.db_addcolumn(map='reaches', columns=reach_columns) # Rename a,b columns v.db_renamecolumn(map='reaches', column=('a_x1', 'x1')) v.db_renamecolumn(map='reaches', column=('a_x2', 'x2')) v.db_renamecolumn(map='reaches', column=('a_y1', 'y1')) v.db_renamecolumn(map='reaches', column=('a_y2', 'y2')) v.db_renamecolumn(map='reaches', column=('a_stream_type', 'stream_type')) v.db_renamecolumn(map='reaches', column=('a_type_code', 'type_code')) v.db_renamecolumn(map='reaches', column=('a_cat', 'rnum_cat')) v.db_renamecolumn(map='reaches', column=('a_tostream', 'tostream')) v.db_renamecolumn(map='reaches', column=('a_id', 'segment_id')) v.db_renamecolumn(map='reaches', column=('a_OUTSEG', 'OUTSEG'))
def smeasure(): gscript.message('Import <%s>' % measuremap.name) measuremap.autoimport('measures', overwrite=True, quiet=quiet, where="betrieb_id = %s" % betriebid) soillossbaremap = maps['soillossbare'] kfactormap = maps['kfactor'] if soillossbarecorrmap.exist(): gscript.message('Using updated soillossbare map.') soillossbaremap = soillossbarecorrmap kfactormap = Rast(parcelmap.name + '.kfactor') if flag_b: measurebarriermap = Vect(measuremap.name + '_barrier') v.extract(input=measuremap.name, where="barrier = 1", output=measurebarriermap.name) measurefieldblockmap = Vect(measuremap.name + '_fieldblocks') v.overlay(ainput=maps['fieldblocks'].name, binput=measurebarriermap.name,\ operator='not', output=measurefieldblockmap.name) rsoillossbare.inputs.elevation = maps['elevation'].name rsoillossbare.inputs.rfactor = maps['rfactor'].name rsoillossbare.inputs.kfactor = kfactormap.name rsoillossbare.inputs.map = measurefieldblockmap.name rsoillossbare.inputs.constant_m = '0.6' rsoillossbare.inputs.constant_n = '1.4' rsoillossbare.flags.r = True rsoillossbare(soillossbare=soillossbarebarriermap.name) soillossbaremap = soillossbarebarriermap parcelpfactor = parcelmap.name + '.pfactor' parcelcfactor = parcelmap.name + '.cfactor' v.to_rast(input=parcelmap.name, use='attr', attrcolumn='pfactor', output=parcelpfactor) v.to_rast(input=parcelmap.name, use='attr', attrcolumn='cfactor', output=parcelcfactor) measurepfactor = measuremap.name + '.pfactor' measurecfactor = measuremap.name + '.cfactor' v.to_rast(input=measuremap.name, use='attr', attrcolumn='pfactor', output=measurepfactor) v.to_rast(input=measuremap.name, use='attr', attrcolumn='cfactor', output=measurecfactor) pfactor = parcelmap.name + '.pfactor.measure' cfactor = parcelmap.name + '.cfactor.measure' r.patch(input=(measurepfactor,parcelpfactor), output=pfactor) r.patch(input=(measurecfactor,parcelcfactor), output=cfactor) rsoillossgrow.inputs.soillossbare = soillossbaremap.name rsoillossgrow.inputs.cfactor = pfactor rsoillossgrow.inputs.pfactor = cfactor rsoillossgrow(soillossgrow=soillossmeasuremap.name) rsoillossreclass(soillossmeasuremap.name, 'soillossgrow',flags='') gscript.message('Reclassified and colored maps found in <%s.3> and <%s.9> .'%(soillossmeasuremap.name, soillossmeasuremap.name)) if flag_s: gscript.message('\n \n Statistics for soilloss on grown soil <%s> : '%(soillossgrowmap)) rsoillossstats(soilloss=soillossmeasuremap.name, map=parcelmap.name, parcelnumcol='id') if not flag_c: g.copy(rast=(soillossmeasuremap.name,output)) gscript.message('Copy made to <%s> for automatic output' %(output))
#g.remove(type='vector', pattern='*', flags=['f']) # Importing the ShapeFiles to GRASS for lr in Layers: v.in_ogr(input=Join('SHP', lr), output=lr, flags=O, key='id') Blocks = Calculation.Blocks # Calculation.py does the calculation for ix, block in enumerate(Blocks, start=1): Points = 'Trip_%s' % ix JR = 'JoinedRoad_%s' % ix Path = 'Path_%s' % ix B = ['Start'] + sorted(block) + ['End'] L = len(B) whr = 'Block IN %s' % str(tuple(B)) v.extract(input='nPoints', output=Points, where=whr, flags=O) v.net( input='nRoads', points=Points, # Trip_1 output=JR, operation='connect', threshold='100', flags=O) #v.db_addtable(map=JR, layer=2) #v.db_update(map=JR, column='cat', value=1, where="Block like 'Start'") #v.db_update(map=JR, column='cat', value=L, where="Block like 'End'") #for ix,i in enumerate(sorted(block), start=2): #v.db_update(map=Points, column='cat', value=ix, where="Block like '%s'"%i)