def vmapcalc2(vmap, vlayer, cname, ctype, expr, overwrite=False): v.db_addcolumn( map=vmap, layer=vlayer, columns=[(cname, ctype)], overwrite=overwrite ) v.db_update( map=vmap, layer=vlayer, column=cname, query_column=expr, overwrite=overwrite )
def main(): """ Build gravity reservoirs in GSFLOW: combines MODFLOW grid and HRU sub-basins These define the PRMS soil zone that connects to MODFLOW cells """ ################## # OPTION PARSING # ################## # I/O options, flags = gscript.parser() # I/O HRUs = options['hru_input'] grid = options['grid_input'] segments = options['output'] #col = options['col'] gravity_reservoirs = options['output'] ############ # ANALYSIS # ############ """ # Basin areas v.db_addcolumn(map=basins, columns=col) v.to_db(map=basins, option='area', units='meters', columns=col) """ # Create gravity reservoirs -- overlay cells=grid and HRUs v.overlay(ainput=HRUs, binput=grid, atype='area', btype='area', operator='and', output=gravity_reservoirs, overwrite=gscript.overwrite()) v.db_dropcolumn(map=gravity_reservoirs, columns='a_cat,a_label,b_cat', quiet=True) # Cell and HRU ID's v.db_renamecolumn(map=gravity_reservoirs, column=('a_id', 'gvr_hru_id'), quiet=True) v.db_renamecolumn(map=gravity_reservoirs, column=('b_id', 'gvr_cell_id'), quiet=True) # Percent areas v.db_renamecolumn(map=gravity_reservoirs, column=('a_hru_area_m2', 'hru_area_m2'), quiet=True) v.db_renamecolumn(map=gravity_reservoirs, column=('b_area_m2', 'cell_area_m2'), quiet=True) v.db_addcolumn(map=gravity_reservoirs, columns='area_m2 double precision', quiet=True) v.to_db(map=gravity_reservoirs, option='area', units='meters', columns='area_m2', quiet=True) v.db_addcolumn(map=gravity_reservoirs, columns='gvr_cell_pct double precision, gvr_hru_pct double precision', quiet=True) v.db_update(map=gravity_reservoirs, column='gvr_cell_pct', query_column='100*area_m2/cell_area_m2', quiet=True) v.db_update(map=gravity_reservoirs, column='gvr_hru_pct', query_column='100*area_m2/hru_area_m2', quiet=True) v.extract(input=gravity_reservoirs, output='tmp_', where="gvr_cell_pct > 0.001", overwrite=True, quiet=True) g.rename(vector=('tmp_',gravity_reservoirs), overwrite=True, quiet=True)
def raster_to_vector(raster, vector, type): """Converts a raster to a vector map Parameters ---------- raster : Name of the input raster map vector : Name for the output vector map type : Type for the output vector map Returns ------- Examples -------- .. """ r.to_vect(input=flow_in_category, output=flow_in_category, type="area", quiet=True) # Value is the ecosystem type v.db_renamecolumn(map=flow_in_category, column=("value", "ecosystem")) # New column for flow values addcolumn_string = flow_column_name + " double" v.db_addcolumn(map=flow_in_category, columns=addcolumn_string) # The raster category 'label' is the 'flow' v.db_update(map=flow_in_category, column="flow", query_column="label") v.db_dropcolumn(map=flow_in_category, columns="label") # Update the aggregation raster categories v.db_addcolumn(map=flow_in_category, columns="aggregation_id int") v.db_update(map=flow_in_category, column="aggregation_id", value=category) v.colors(map=flow_in_category, raster=flow_in_category, quiet=True)
""" # Prepare the stream lines and the points version of the same r.thin(input='streams_tmp', output='streams', overwrite=True) r.to_vect(input='streams', output='streams', type='line', overwrite=True) v.db_dropcolumn(map='streams', columns='label') v.db_renamecolumn(map='streams', column=('value', 'river_number')) r.to_vect(input='streams', output='streams_points', type='point', overwrite=True) v.db_dropcolumn(map='streams_points', columns='label') v.db_renamecolumn(map='streams_points', column=('value', 'river_number')) # Get slope and area v.db_addcolumn(map='streams_points', columns=('slope double precision, area_km2 double precision')) v.what_rast(map='streams_points', type='point', raster='slope', column='slope') v.what_rast(map='streams_points', type='point', raster='drainageArea_km2', column='area_km2') # The following is not following upstream/downstream conventions. # Time to do this the hard way. # 1. Get vectorTopo # 2. Get coordinates # 3. Get areas at coordinates # 4. Sort points to go from small A to large A -- but not yet uploading # 5. Upload small area as x1, y1; large area as x2, y2 from grass.pygrass.vector import Vector, VectorTopo
def main(): """ Builds river segments for input to the USGS hydrologic models PRMS and GSFLOW. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() # I/O streams = options['input'] segments = options['output'] # Hydraulic geometry ICALC = options['icalc'] # ICALC=0: Constant depth WIDTH1 = options['width1'] WIDTH2 = options['width2'] # ICALC=1: Manning ROUGHCH = options['roughch'] # ICALC=2: Manning ROUGHBK = options['roughbk'] # ICALC=3: Power-law relationships (following Leopold and others) # The at-a-station default exponents are from Rhodes (1977) CDPTH = str(float(options['cdpth']) / 35.3146667) # cfs to m^3/s FDPTH = options['fdpth'] AWDTH = str(float(options['awdth']) / 35.3146667) # cfs to m^3/s BWDTH = options['bwdth'] ################################################## # CHECKING DEPENDENCIES WITH OPTIONAL PARAMETERS # ################################################## if ICALC == 3: if CDPTH and FDPTH and AWDTH and BWDTH: pass else: grass.fatal('Missing CDPTH, FDPTH, AWDTH, and/or BWDTH. \ These are required when ICALC = 3.') ########### # RUNNING # ########### # New Columns for Segments segment_columns = [] # Self ID segment_columns.append('id integer') # segment number segment_columns.append('ISEG integer') # segment number segment_columns.append('NSEG integer') # segment number # for GSFLOW segment_columns.append('ICALC integer') # 3 for power function segment_columns.append( 'OUTSEG integer') # downstream segment -- tostream, renumbered segment_columns.append('ROUGHCH double precision') # overbank roughness segment_columns.append('ROUGHBK double precision') # in-channel roughness segment_columns.append('WIDTH1 double precision') # overbank roughness segment_columns.append('WIDTH2 double precision') # in-channel roughness segment_columns.append('CDPTH double precision') # depth coeff segment_columns.append('FDPTH double precision') # depth exp segment_columns.append('AWDTH double precision') # width coeff segment_columns.append('BWDTH double precision') # width exp # The below will be all 0 segment_columns.append( 'IUPSEG varchar') # upstream segment ID number, for diversions segment_columns.append('FLOW varchar') segment_columns.append('RUNOFF varchar') segment_columns.append('ETSW varchar') segment_columns.append('PPTSW varchar') segment_columns = ",".join(segment_columns) # CONSIDER THE EFFECT OF OVERWRITING COLUMNS -- WARN FOR THIS # IF MAP EXISTS ALREADY? # Create a map to work with g.copy(vector=(streams, segments), overwrite=gscript.overwrite()) # and add its columns v.db_addcolumn(map=segments, columns=segment_columns) # Produce the data table entries ################################## colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(segments, layer=1)['values'].values()) number_of_segments = colValues.shape[0] cats = colValues[:, colNames == 'cat'].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) segmentsTopo = VectorTopo(segments) segmentsTopo.open('rw') cur = segmentsTopo.table.conn.cursor() # id = cat (as does ISEG and NSEG) cur.executemany("update " + segments + " set id=? where cat=?", nseg_cats) cur.executemany("update " + segments + " set ISEG=? where cat=?", nseg_cats) cur.executemany("update " + segments + " set NSEG=? where cat=?", nseg_cats) # outseg = tostream: default is 0 if "tostream" is off-map cur.execute("update " + segments + " set OUTSEG=0") cur.executemany("update " + segments + " set OUTSEG=? where tostream=?", nseg_cats) # Discharge and hydraulic geometry cur.execute("update " + segments + " set WIDTH1=" + str(WIDTH1)) cur.execute("update " + segments + " set WIDTH2=" + str(WIDTH2)) cur.execute("update " + segments + " set ROUGHCH=" + str(ROUGHCH)) cur.execute("update " + segments + " set ROUGHBK=" + str(ROUGHBK)) cur.execute("update " + segments + " set ICALC=" + str(ICALC)) cur.execute("update " + segments + " set CDPTH=" + str(CDPTH)) cur.execute("update " + segments + " set FDPTH=" + str(FDPTH)) cur.execute("update " + segments + " set AWDTH=" + str(AWDTH)) cur.execute("update " + segments + " set BWDTH=" + str(BWDTH)) gscript.message('') gscript.message('NOTICE: not currently used:') gscript.message('IUPSEG, FLOW, RUNOFF, ETSW, and PPTSW.') gscript.message('All set to 0.') gscript.message('') # values that are 0 cur.execute("update " + segments + " set IUPSEG=" + str(0)) cur.execute("update " + segments + " set FLOW=" + str(0)) cur.execute("update " + segments + " set RUNOFF=" + str(0)) cur.execute("update " + segments + " set ETSW=" + str(0)) cur.execute("update " + segments + " set PPTSW=" + str(0)) segmentsTopo.table.conn.commit() segmentsTopo.close()
def main(): """ Import any raster or vector data set and add its attribute to a GSFLOW data object """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() # Parsing if options['attrtype'] == 'int': attrtype = 'integer' elif options['attrtype'] == 'float': attrtype = 'double precision' elif options['attrtype'] == 'string': attrtype = 'varchar' else: attrtype = '' ######################################## # PROCESS AND UPLOAD TO DATABASE TABLE # ######################################## if options['vector_area'] is not '': gscript.use_temp_region() g.region(vector=options['map'], res=options['dxy']) v.to_rast(input=options['vector_area'], output='tmp___tmp', use='attr', attribute_column=options['from_column'], quiet=True, overwrite=True) try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns=options['column'], quiet=True) except: pass if attrtype is 'double precision': try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns='tmp_average', quiet=True) except: pass v.rast_stats(map=options['map'], raster='tmp___tmp', column_prefix='tmp', method='average', flags='c', quiet=True) g.remove(type='raster', name='tmp___tmp', flags='f', quiet=True) v.db_renamecolumn(map=options['map'], column=['tmp_average', options['column']], quiet=True) else: try: v.db_addcolumn(map=options['map'], columns=options['column'] + ' ' + attrtype, quiet=True) except: pass gscript.run_command('v.distance', from_=options['map'], to=options['vector_area'], upload='to_attr', to_column=options['from_column'], column=options['column'], quiet=True) elif options['vector_points'] is not '': try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns=options['column'], quiet=True) v.db_addcolumn(map=options['map'], columns=options['column'] + ' ' + attrtype, quiet=True) except: pass gscript.run_command('v.distance', from_=options['map'], to=options['vector_points'], upload='to_attr', to_column=options['from_column'], column=options['column'], quiet=True) elif options['raster'] is not '': try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options['map'], columns=options['column'], quiet=True) except: pass v.rast_stats(map=options['map'], raster=options['raster'], column_prefix='tmp', method='average', flags='c', quiet=True) v.db_renamecolumn(map=options['map'], column=['tmp_average', options['column']], quiet=True) gscript.message("Done.")
#r.mapcalc('drainageArea_km2 = drainageArea_km2 * (drainageArea_km2 > 0)', overwrite=True) #r.null(map='drainageArea_km2', setnull=0) #r.mapcalc(elevation+" = "+elevation+"*drainageArea_km2*0", overwrite=True) # Get watershed print "Building drainage network" r.stream_extract(elevation=elevation, accumulation='drainageArea_m2', threshold=thresh, d8cut=0, mexp=0, stream_raster='streams', stream_vector='streams', direction='draindir', overwrite=True) """ # Get slope and area v.db_addcolumn(map='streams_points', columns=('slope double precision, area_km2 double precision')) v.what_rast(map='streams_points', type='point', raster='slope', column='slope') v.what_rast(map='streams_points', type='point', raster='drainageArea_km2', column='area_km2') """ """ ~~~~~~~~~~~~~~~~~~~~~ awickert@dakib:~$ Now that I have a good ordering scheme, find adjacency between units based on starting and ending points and then convert each of these to points and get the slopes and areas between them. Concatenate these all into a single line and see what kind of averaging is needed -- of position, slope, and area. Poisition should include a base downstream distance as well, so we can keep averaged lines on the same course. 06 NOV ~~~~~~~~~~~~~~~~~~~~~ """ #################### # COMPUTE NETWORKS # ####################
def main(): """ Builds river reaches for input to the USGS hydrologic model, GSFLOW. These reaches link the PRMS stream segments to the MODFLOW grid cells. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() segments = options["segment_input"] grid = options["grid_input"] reaches = options["output"] elevation = options["elevation"] Smin = options["s_min"] h_stream = options["h_stream"] x1 = options["upstream_easting_column_seg"] y1 = options["upstream_northing_column_seg"] x2 = options["downstream_easting_column_seg"] y2 = options["downstream_northing_column_seg"] tostream = options["tostream_cat_column_seg"] # Hydraulic paramters STRTHICK = options["strthick"] STRHC1 = options["strhc1"] THTS = options["thts"] THTI = options["thti"] EPS = options["eps"] UHC = options["uhc"] # Build reach maps by overlaying segments on grid if len(gscript.find_file(segments, element="vector")["name"]) > 0: v.extract( input=segments, output="GSFLOW_TEMP__", type="line", quiet=True, overwrite=True, ) v.overlay( ainput="GSFLOW_TEMP__", atype="line", binput=grid, output=reaches, operator="and", overwrite=gscript.overwrite(), quiet=True, ) g.remove(type="vector", name="GSFLOW_TEMP__", quiet=True, flags="f") else: gscript.fatal('No vector file "' + segments + '" found.') # Start editing database table reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") # Rename a,b columns reachesTopo.table.columns.rename("a_" + x1, "x1") reachesTopo.table.columns.rename("a_" + x2, "x2") reachesTopo.table.columns.rename("a_" + y1, "y1") reachesTopo.table.columns.rename("a_" + y2, "y2") reachesTopo.table.columns.rename("a_NSEG", "NSEG") reachesTopo.table.columns.rename("a_ISEG", "ISEG") reachesTopo.table.columns.rename("a_stream_type", "stream_type") reachesTopo.table.columns.rename("a_type_code", "type_code") reachesTopo.table.columns.rename("a_cat", "rnum_cat") reachesTopo.table.columns.rename("a_" + tostream, "tostream") reachesTopo.table.columns.rename("a_id", "segment_id") reachesTopo.table.columns.rename("a_OUTSEG", "OUTSEG") reachesTopo.table.columns.rename("b_row", "row") reachesTopo.table.columns.rename("b_col", "col") reachesTopo.table.columns.rename("b_id", "cell_id") # Drop unnecessary columns cols = reachesTopo.table.columns.names() for col in cols: if (col[:2] == "a_") or (col[:2] == "b_"): reachesTopo.table.columns.drop(col) # Add new columns to 'reaches' reachesTopo.table.columns.add("KRCH", "integer") reachesTopo.table.columns.add("IRCH", "integer") reachesTopo.table.columns.add("JRCH", "integer") reachesTopo.table.columns.add("IREACH", "integer") reachesTopo.table.columns.add("RCHLEN", "double precision") reachesTopo.table.columns.add("STRTOP", "double precision") reachesTopo.table.columns.add("SLOPE", "double precision") reachesTopo.table.columns.add("STRTHICK", "double precision") reachesTopo.table.columns.add("STRHC1", "double precision") reachesTopo.table.columns.add("THTS", "double precision") reachesTopo.table.columns.add("THTI", "double precision") reachesTopo.table.columns.add("EPS", "double precision") reachesTopo.table.columns.add("UHC", "double precision") reachesTopo.table.columns.add("xr1", "double precision") reachesTopo.table.columns.add("xr2", "double precision") reachesTopo.table.columns.add("yr1", "double precision") reachesTopo.table.columns.add("yr2", "double precision") # Commit columns before editing (necessary?) reachesTopo.table.conn.commit() reachesTopo.close() # Update some columns that can be done now reachesTopo.open("rw") colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(reaches, layer=1)["values"].values()) cats = colValues[:, colNames == "cat"].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) cur = reachesTopo.table.conn.cursor() # Hydrogeologic properties cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK)) cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1)) cur.execute("update " + reaches + " set THTS=" + str(THTS)) cur.execute("update " + reaches + " set THTI=" + str(THTI)) cur.execute("update " + reaches + " set EPS=" + str(EPS)) cur.execute("update " + reaches + " set UHC=" + str(UHC)) # Grid properties cur.execute("update " + reaches + " set KRCH=1") # Top layer: unchangable cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats) cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats) reachesTopo.table.conn.commit() reachesTopo.close() v.to_db(map=reaches, columns="RCHLEN", option="length", quiet=True) # Still to go after these: # STRTOP (added with slope) # IREACH (whole next section dedicated to this) # SLOPE (need z_start and z_end) # Now, the light stuff is over: time to build the reach order v.to_db(map=reaches, option="start", columns="xr1,yr1") v.to_db(map=reaches, option="end", columns="xr2,yr2") # Now just sort by category, find which stream has the same xr1 and yr1 as # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another # starting point and move down the line. # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1" # First, get the starting coordinates of each stream segment # and a set of river ID's (ordered from 1...N) colNames = np.array(gscript.vector_db_select(segments, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(segments, layer=1)["values"].values()) number_of_segments = colValues.shape[0] segment_x1s = colValues[:, colNames == "x1"].astype(float).squeeze() segment_y1s = colValues[:, colNames == "y1"].astype(float).squeeze() segment_ids = colValues[:, colNames == "id"].astype(float).squeeze() # Then move back to the reaches map to produce the ordering colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"]) colValues = np.array(gscript.vector_db_select(reaches, layer=1)["values"].values()) reach_cats = colValues[:, colNames == "cat"].astype(int).squeeze() reach_x1s = colValues[:, colNames == "xr1"].astype(float).squeeze() reach_y1s = colValues[:, colNames == "yr1"].astype(float).squeeze() reach_x2s = colValues[:, colNames == "xr2"].astype(float).squeeze() reach_y2s = colValues[:, colNames == "yr2"].astype(float).squeeze() segment_ids__reach = colValues[:, colNames == "segment_id"].astype(float).squeeze() for segment_id in segment_ids: reach_order_cats = [] downstream_directed = [] ssel = segment_ids == segment_id rsel = segment_ids__reach == segment_id # selector # Find first segment: x1y1 first here, but not necessarily later downstream_directed.append(1) _x_match = reach_x1s[rsel] == segment_x1s[ssel] _y_match = reach_y1s[rsel] == segment_y1s[ssel] _i_match = _x_match * _y_match x1y1 = True # false if x2y2 # Find cat _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_order_cats.append(_cat) # Get end of reach = start of next one reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) while _i_match.any(): _x_match = reach_x1s[rsel] == reach_x_end _y_match = reach_y1s[rsel] == reach_y_end _i_match = _x_match * _y_match if _i_match.any(): _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) reach_order_cats.append(_cat) _message = str(len(reach_order_cats)) + " " + str(len(reach_cats[rsel])) gscript.message(_message) # Reach order to database table reach_number__reach_order_cats = [] for i in range(len(reach_order_cats)): reach_number__reach_order_cats.append((i + 1, reach_order_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") cur = reachesTopo.table.conn.cursor() cur.executemany( "update " + reaches + " set IREACH=? where cat=?", reach_number__reach_order_cats, ) reachesTopo.table.conn.commit() reachesTopo.close() # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!! # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END # 2018.10.01: Updating this to use the computational region for the DEM g.region(raster=elevation) # Compute slope and starting elevations from the elevations at the start and # end of the reaches and the length of each reach] gscript.message("Obtaining elevation values from raster: may take time.") v.db_addcolumn(map=reaches, columns="zr1 double precision, zr2 double precision") zr1 = [] zr2 = [] for i in range(len(reach_cats)): _x = reach_x1s[i] _y = reach_y1s[i] # print _x, _y _z = float( gscript.parse_command( "r.what", map=elevation, coordinates=str(_x) + "," + str(_y) ) .keys()[0] .split("|")[-1] ) zr1.append(_z) _x = reach_x2s[i] _y = reach_y2s[i] _z = float( gscript.parse_command( "r.what", map=elevation, coordinates=str(_x) + "," + str(_y) ) .keys()[0] .split("|")[-1] ) zr2.append(_z) zr1_cats = [] zr2_cats = [] for i in range(len(reach_cats)): zr1_cats.append((zr1[i], reach_cats[i])) zr2_cats.append((zr2[i], reach_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open("rw") cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats) cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats) reachesTopo.table.conn.commit() reachesTopo.close() # Use these to create slope -- backwards possible on DEM! v.db_update(map=reaches, column="SLOPE", value="(zr1 - zr2)/RCHLEN") v.db_update(map=reaches, column="SLOPE", value=Smin, where="SLOPE <= " + str(Smin)) # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid) # resolution # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o# # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!! v.db_addcolumn(map=reaches, columns="z_topo_mean double precision") v.what_rast( map=reaches, raster=elevation, column="z_topo_mean" ) # , query_column='z') v.db_update( map=reaches, column="STRTOP", value="z_topo_mean -" + str(h_stream), quiet=True )
def main(): """ Builds river reaches for input to the USGS hydrologic model, GSFLOW. These reaches link the PRMS stream segments to the MODFLOW grid cells. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() segments = options['segment_input'] grid = options['grid_input'] reaches = options['output'] elevation = options['elevation'] Smin = options['s_min'] h_stream = options['h_stream'] x1 = options['upstream_easting_column_seg'] y1 = options['upstream_northing_column_seg'] x2 = options['downstream_easting_column_seg'] y2 = options['downstream_northing_column_seg'] tostream = options['tostream_cat_column_seg'] # Hydraulic paramters STRTHICK = options['strthick'] STRHC1 = options['strhc1'] THTS = options['thts'] THTI = options['thti'] EPS = options['eps'] UHC = options['uhc'] # Build reach maps by overlaying segments on grid if len(gscript.find_file(segments, element='vector')['name']) > 0: v.extract(input=segments, output='GSFLOW_TEMP__', type='line', quiet=True, overwrite=True) v.overlay(ainput='GSFLOW_TEMP__', atype='line', binput=grid, output=reaches, operator='and', overwrite=gscript.overwrite(), quiet=True) g.remove(type='vector', name='GSFLOW_TEMP__', quiet=True, flags='f') else: gscript.fatal('No vector file "' + segments + '" found.') # Start editing database table reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') # Rename a,b columns reachesTopo.table.columns.rename('a_' + x1, 'x1') reachesTopo.table.columns.rename('a_' + x2, 'x2') reachesTopo.table.columns.rename('a_' + y1, 'y1') reachesTopo.table.columns.rename('a_' + y2, 'y2') reachesTopo.table.columns.rename('a_NSEG', 'NSEG') reachesTopo.table.columns.rename('a_ISEG', 'ISEG') reachesTopo.table.columns.rename('a_stream_type', 'stream_type') reachesTopo.table.columns.rename('a_type_code', 'type_code') reachesTopo.table.columns.rename('a_cat', 'rnum_cat') reachesTopo.table.columns.rename('a_' + tostream, 'tostream') reachesTopo.table.columns.rename('a_id', 'segment_id') reachesTopo.table.columns.rename('a_OUTSEG', 'OUTSEG') reachesTopo.table.columns.rename('b_row', 'row') reachesTopo.table.columns.rename('b_col', 'col') reachesTopo.table.columns.rename('b_id', 'cell_id') # Drop unnecessary columns cols = reachesTopo.table.columns.names() for col in cols: if (col[:2] == 'a_') or (col[:2] == 'b_'): reachesTopo.table.columns.drop(col) # Add new columns to 'reaches' reachesTopo.table.columns.add('KRCH', 'integer') reachesTopo.table.columns.add('IRCH', 'integer') reachesTopo.table.columns.add('JRCH', 'integer') reachesTopo.table.columns.add('IREACH', 'integer') reachesTopo.table.columns.add('RCHLEN', 'double precision') reachesTopo.table.columns.add('STRTOP', 'double precision') reachesTopo.table.columns.add('SLOPE', 'double precision') reachesTopo.table.columns.add('STRTHICK', 'double precision') reachesTopo.table.columns.add('STRHC1', 'double precision') reachesTopo.table.columns.add('THTS', 'double precision') reachesTopo.table.columns.add('THTI', 'double precision') reachesTopo.table.columns.add('EPS', 'double precision') reachesTopo.table.columns.add('UHC', 'double precision') reachesTopo.table.columns.add('xr1', 'double precision') reachesTopo.table.columns.add('xr2', 'double precision') reachesTopo.table.columns.add('yr1', 'double precision') reachesTopo.table.columns.add('yr2', 'double precision') # Commit columns before editing (necessary?) reachesTopo.table.conn.commit() reachesTopo.close() # Update some columns that can be done now reachesTopo.open('rw') colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(reaches, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) cur = reachesTopo.table.conn.cursor() # Hydrogeologic properties cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK)) cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1)) cur.execute("update " + reaches + " set THTS=" + str(THTS)) cur.execute("update " + reaches + " set THTI=" + str(THTI)) cur.execute("update " + reaches + " set EPS=" + str(EPS)) cur.execute("update " + reaches + " set UHC=" + str(UHC)) # Grid properties cur.execute("update " + reaches + " set KRCH=1") # Top layer: unchangable cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats) cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats) reachesTopo.table.conn.commit() reachesTopo.close() v.to_db(map=reaches, columns='RCHLEN', option='length', quiet=True) # Still to go after these: # STRTOP (added with slope) # IREACH (whole next section dedicated to this) # SLOPE (need z_start and z_end) # Now, the light stuff is over: time to build the reach order v.to_db(map=reaches, option='start', columns='xr1,yr1') v.to_db(map=reaches, option='end', columns='xr2,yr2') # Now just sort by category, find which stream has the same xr1 and yr1 as # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another # starting point and move down the line. # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1" # First, get the starting coordinates of each stream segment # and a set of river ID's (ordered from 1...N) colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(segments, layer=1)['values'].values()) number_of_segments = colValues.shape[0] segment_x1s = colValues[:, colNames == 'x1'].astype(float).squeeze() segment_y1s = colValues[:, colNames == 'y1'].astype(float).squeeze() segment_ids = colValues[:, colNames == 'id'].astype(float).squeeze() # Then move back to the reaches map to produce the ordering colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(reaches, layer=1)['values'].values()) reach_cats = colValues[:, colNames == 'cat'].astype(int).squeeze() reach_x1s = colValues[:, colNames == 'xr1'].astype(float).squeeze() reach_y1s = colValues[:, colNames == 'yr1'].astype(float).squeeze() reach_x2s = colValues[:, colNames == 'xr2'].astype(float).squeeze() reach_y2s = colValues[:, colNames == 'yr2'].astype(float).squeeze() segment_ids__reach = colValues[:, colNames == 'segment_id'].astype( float).squeeze() for segment_id in segment_ids: reach_order_cats = [] downstream_directed = [] ssel = segment_ids == segment_id rsel = segment_ids__reach == segment_id # selector # Find first segment: x1y1 first here, but not necessarily later downstream_directed.append(1) _x_match = reach_x1s[rsel] == segment_x1s[ssel] _y_match = reach_y1s[rsel] == segment_y1s[ssel] _i_match = _x_match * _y_match x1y1 = True # false if x2y2 # Find cat _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_order_cats.append(_cat) # Get end of reach = start of next one reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) while _i_match.any(): _x_match = reach_x1s[rsel] == reach_x_end _y_match = reach_y1s[rsel] == reach_y_end _i_match = _x_match * _y_match if _i_match.any(): _cat = int(reach_cats[rsel][_x_match * _y_match]) reach_x_end = float(reach_x2s[reach_cats == _cat]) reach_y_end = float(reach_y2s[reach_cats == _cat]) reach_order_cats.append(_cat) print len(reach_order_cats), len(reach_cats[rsel]) # Reach order to database table reach_number__reach_order_cats = [] for i in range(len(reach_order_cats)): reach_number__reach_order_cats.append((i + 1, reach_order_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set IREACH=? where cat=?", reach_number__reach_order_cats) reachesTopo.table.conn.commit() reachesTopo.close() # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!! # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END # 2018.10.01: Updating this to use the computational region for the DEM g.region(raster=elevation) # Compute slope and starting elevations from the elevations at the start and # end of the reaches and the length of each reach] gscript.message('Obtaining elevation values from raster: may take time.') v.db_addcolumn(map=reaches, columns='zr1 double precision, zr2 double precision') zr1 = [] zr2 = [] for i in range(len(reach_cats)): _x = reach_x1s[i] _y = reach_y1s[i] #print _x, _y _z = float( gscript.parse_command('r.what', map=elevation, coordinates=str(_x) + ',' + str(_y)).keys()[0].split('|')[-1]) zr1.append(_z) _x = reach_x2s[i] _y = reach_y2s[i] _z = float( gscript.parse_command('r.what', map=elevation, coordinates=str(_x) + ',' + str(_y)).keys()[0].split('|')[-1]) zr2.append(_z) zr1_cats = [] zr2_cats = [] for i in range(len(reach_cats)): zr1_cats.append((zr1[i], reach_cats[i])) zr2_cats.append((zr2[i], reach_cats[i])) reachesTopo = VectorTopo(reaches) reachesTopo.open('rw') cur = reachesTopo.table.conn.cursor() cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats) cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats) reachesTopo.table.conn.commit() reachesTopo.close() # Use these to create slope -- backwards possible on DEM! v.db_update(map=reaches, column='SLOPE', value='(zr1 - zr2)/RCHLEN') v.db_update(map=reaches, column='SLOPE', value=Smin, where='SLOPE <= ' + str(Smin)) # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid) # resolution # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o# # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!! v.db_addcolumn(map=reaches, columns='z_topo_mean double precision') v.what_rast(map=reaches, raster=elevation, column='z_topo_mean') #, query_column='z') v.db_update(map=reaches, column='STRTOP', value='z_topo_mean -' + str(h_stream), quiet=True)
def main(): """ Adds GSFLOW parameters to a set of HRU sub-basins """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() basins = options['input'] HRU = options['output'] slope = options['slope'] aspect = options['aspect'] elevation = options['elevation'] land_cover = options['cov_type'] soil = options['soil_type'] ################################ # CREATE HRUs FROM SUB-BASINS # ################################ g.copy(vector=(basins,HRU), overwrite=gscript.overwrite()) ############################################ # ATTRIBUTE COLUMNS (IN ORDER FROM MANUAL) # ############################################ # HRU hru_columns = [] # Self ID hru_columns.append('id integer') # nhru # Basic Physical Attributes (Geometry) hru_columns.append('hru_area double precision') # acres (!!!!) hru_columns.append('hru_area_m2 double precision') # [not for GSFLOW: for me!] hru_columns.append('hru_aspect double precision') # Mean aspect [degrees] hru_columns.append('hru_elev double precision') # Mean elevation hru_columns.append('hru_lat double precision') # Latitude of centroid hru_columns.append('hru_lon double precision') # Longitude of centroid # unnecessary but why not? hru_columns.append('hru_slope double precision') # Mean slope [percent] # Basic Physical Attributes (Other) #hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1 #hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default. # Measured input hru_columns.append('outlet_sta integer') # Index of streamflow station at basin outlet: # station number if it has one, 0 if not # Note that the below specify projections and note lat/lon; they really seem # to work for any projected coordinates, with _x, _y, in meters, and _xlong, # _ylat, in feet (i.e. they are just northing and easting). The meters and feet # are not just simple conversions, but actually are required for different # modules in the code, and are hence redundant but intentional. hru_columns.append('hru_x double precision') # Easting [m] hru_columns.append('hru_xlong double precision') # Easting [feet] hru_columns.append('hru_y double precision') # Northing [m] hru_columns.append('hru_ylat double precision') # Northing [feet] # Streamflow and lake routing hru_columns.append('K_coef double precision') # Travel time of flood wave to next downstream segment; # this is the Muskingum storage coefficient # 1.0 for reservoirs, diversions, and segments flowing # out of the basin hru_columns.append('x_coef double precision') # Amount of attenuation of flow wave; # this is the Muskingum routing weighting factor # range: 0.0--0.5; default 0.2 # 0 for all segments flowing out of the basin hru_columns.append('hru_segment integer') # ID of stream segment to which flow will be routed # this is for non-cascade routing (flow goes directly # from HRU to stream segment) hru_columns.append('obsin_segment integer') # Index of measured streamflow station that replaces # inflow to a segment hru_columns.append('cov_type integer') # 0=bare soil;1=grasses; 2=shrubs; 3=trees; 4=coniferous hru_columns.append('soil_type integer') # 1=sand; 2=loam; 3=clay # Create strings hru_columns = ",".join(hru_columns) # Add columns to tables v.db_addcolumn(map=HRU, columns=hru_columns, quiet=True) ########################### # UPDATE DATABASE ENTRIES # ########################### colNames = np.array(gscript.vector_db_select(HRU, layer=1)['columns']) colValues = np.array(gscript.vector_db_select(HRU, layer=1)['values'].values()) number_of_hrus = colValues.shape[0] cats = colValues[:,colNames == 'cat'].astype(int).squeeze() rnums = colValues[:,colNames == 'rnum'].astype(int).squeeze() nhru = np.arange(1, number_of_hrus + 1) nhrut = [] for i in range(len(nhru)): nhrut.append( (nhru[i], cats[i]) ) # Access the HRUs hru = VectorTopo(HRU) # Open the map with topology: hru.open('rw') # Create a cursor cur = hru.table.conn.cursor() # Use it to loop across the table cur.executemany("update "+HRU+" set id=? where cat=?", nhrut) # Commit changes to the table hru.table.conn.commit() # Close the table hru.close() """ # Do the same for basins <-------------- DO THIS OR SIMPLY HAVE HRUs OVERLAIN WITH GRID CELLS? IN THIS CASE, RMV AREA ADDITION TO GRAVRES v.db_addcolumn(map=basins, columns='id int', quiet=True) basins = VectorTopo(basins) basins.open('rw') cur = basins.table.conn.cursor() cur.executemany("update basins set id=? where cat=?", nhrut) basins.table.conn.commit() basins.close() """ # if you want to append to table # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows #hru_columns.append('hru_area double precision') # Acres b/c USGS v.to_db(map=HRU, option='area', columns='hru_area', units='acres', quiet=True) v.to_db(map=HRU, option='area', columns='hru_area_m2', units='meters', quiet=True) # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN # SLOPE (and aspect) ##################### v.rast_stats(map=HRU, raster=slope, method='average', column_prefix='tmp', flags='c', quiet=True) v.db_update(map=HRU, column='hru_slope', query_column='tmp_average', quiet=True) # ASPECT ######### v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True) # Dealing with conversion from degrees (no good average) to something I can # average -- x- and y-vectors # Geographic coordinates, so sin=x, cos=y.... not that it matters so long # as I am consistent in how I return to degrees r.mapcalc('aspect_x = sin(' + aspect + ')', overwrite=gscript.overwrite(), quiet=True) r.mapcalc('aspect_y = cos(' + aspect + ')', overwrite=gscript.overwrite(), quiet=True) #grass.run_command('v.db.addcolumn', map=HRU, columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer') v.rast_stats(map=HRU, raster='aspect_x', method='sum', column_prefix='aspect_x', flags='c', quiet=True) v.rast_stats(map=HRU, raster='aspect_y', method='sum', column_prefix='aspect_y', flags='c', quiet=True) hru = VectorTopo(HRU) hru.open('rw') cur = hru.table.conn.cursor() cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" %hru.name) _arr = np.array(cur.fetchall()).astype(float) _cat = _arr[:,0] _aspect_x_sum = _arr[:,1] _aspect_y_sum = _arr[:,2] aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180. / np.pi aspect_angle[aspect_angle < 0] += 360 # all positive aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose() cur.executemany("update "+ HRU +" set hru_aspect=? where cat=?", aspect_angle_cat) hru.table.conn.commit() hru.close() # ELEVATION ############ v.rast_stats(map=HRU, raster=elevation, method='average', column_prefix='tmp', flags='c', quiet=True) v.db_update(map=HRU, column='hru_elev', query_column='tmp_average', quiet=True) v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True) # CENTROIDS ############ # get x,y of centroid -- but have areas not in database table, that do have # centroids, and having a hard time finding a good way to get rid of them! # They have duplicate category values! # Perhaps these are little dangles on the edges of the vectorization where # the raster value was the same but pinched out into 1-a few cells? # From looking at map, lots of extra centroids on area boundaries, and removing # small areas (though threshold hard to guess) gets rid of these hru = VectorTopo(HRU) hru.open('rw') hru_cats = [] hru_coords = [] for hru_i in hru: if type(hru_i) is vector.geometry.Centroid: hru_cats.append(hru_i.cat) hru_coords.append(hru_i.coords()) hru_cats = np.array(hru_cats) hru_coords = np.array(hru_coords) hru.rewind() hru_area_ids = [] for coor in hru_coords: _area = hru.find_by_point.area(Point(coor[0], coor[1])) hru_area_ids.append(_area) hru_area_ids = np.array(hru_area_ids) hru.rewind() hru_areas = [] for _area_id in hru_area_ids: hru_areas.append(_area_id.area()) hru_areas = np.array(hru_areas) hru.rewind() allcats = sorted(list(set(list(hru_cats)))) # Now create weighted mean hru_centroid_locations = [] for cat in allcats: hrus_with_cat = hru_cats[hru_cats == cat] if len(hrus_with_cat) == 1: hru_centroid_locations.append((hru_coords[hru_cats == cat]).squeeze()) else: _centroids = hru_coords[hru_cats == cat] #print _centroids _areas = hru_areas[hru_cats == cat] #print _areas _x = np.average(_centroids[:,0], weights=_areas) _y = np.average(_centroids[:,1], weights=_areas) #print _x, _y hru_centroid_locations.append(np.array([_x, _y])) # Now upload weighted mean to database table # allcats and hru_centroid_locations are co-indexed index__cats = create_iterator(HRU) cur = hru.table.conn.cursor() for i in range(len(allcats)): # meters cur.execute('update '+HRU +' set hru_x='+str(hru_centroid_locations[i][0]) +' where cat='+str(allcats[i])) cur.execute('update '+HRU +' set hru_y='+str(hru_centroid_locations[i][1]) +' where cat='+str(allcats[i])) # feet cur.execute('update '+HRU +' set hru_xlong='+str(hru_centroid_locations[i][0]*3.28084) +' where cat='+str(allcats[i])) cur.execute('update '+HRU +' set hru_ylat='+str(hru_centroid_locations[i][1]*3.28084) +' where cat='+str(allcats[i])) # (un)Project to lat/lon _centroid_ll = gscript.parse_command('m.proj', coordinates= list(hru_centroid_locations[i]), flags='od').keys()[0] _lon, _lat, _z = _centroid_ll.split('|') cur.execute('update '+HRU +' set hru_lon='+_lon +' where cat='+str(allcats[i])) cur.execute('update '+HRU +' set hru_lat='+_lat +' where cat='+str(allcats[i])) # feet -- not working. # Probably an issue with index__cats -- maybe fix later, if needed # But currently not a major speed issue """ cur.executemany("update "+HRU+" set hru_xlong=?*3.28084 where hru_x=?", index__cats) cur.executemany("update "+HRU+" set hru_ylat=?*3.28084 where hru_y=?", index__cats) """ cur.close() hru.table.conn.commit() hru.close() # ID NUMBER ############ #cur.executemany("update "+HRU+" set hru_segment=? where id=?", # index__cats) # Segment number = HRU ID number v.db_update(map=HRU, column='hru_segment', query_column='id', quiet=True) # LAND USE/COVER ############ try: land_cover = int(land_cover) except: pass if type(land_cover) is int: if land_cover <= 3: v.db_update(map=HRU, column='cov_type', value=land_cover, quiet=True) else: sys.exit("WARNING: INVALID LAND COVER TYPE. CHECK INTEGER VALUES.\n" "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW") else: # NEED TO UPDATE THIS TO MODAL VALUE!!!! gscript.message("Warning: values taken from HRU centroids. Code should be updated to") gscript.message("acquire modal values") v.what_rast(map=HRU, type='centroid', raster=land_cover, column='cov_type', quiet=True) #v.rast_stats(map=HRU, raster=land_cover, method='average', column_prefix='tmp', flags='c', quiet=True) #v.db_update(map=HRU, column='cov_type', query_column='tmp_average', quiet=True) #v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True) # SOIL ############ try: soil = int(soil) except: pass if type(soil) is int: if (soil > 0) and (soil <= 3): v.db_update(map=HRU, column='soil_type', value=soil, quiet=True) else: sys.exit("WARNING: INVALID SOIL TYPE. CHECK INTEGER VALUES.\n" "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW") else: # NEED TO UPDATE THIS TO MODAL VALUE!!!! gscript.message("Warning: values taken from HRU centroids. Code should be updated to") gscript.message("acquire modal values") v.what_rast(map=HRU, type='centroid', raster=soil, column='soil_type', quiet=True)
r.mapcalc('tmpStreamZ = (tmpStream * 0 + 1) * tmp', quiet=True, overwrite=True) r.to_vect(input='tmpStreamZ', output='Line__' + DEM, type='line', quiet=True, overwrite=True) r.to_vect(input='tmpStreamZ', output='Points__' + DEM, type='point', column='z', quiet=True, overwrite=True) v.db_addcolumn(map='Points__' + DEM, columns='x double precision, y double precision', quiet=True) v.to_db(map='Points__' + DEM, option='coor', columns='x,y', quiet=True) channels = sorted( gscript.parse_command('g.list', type='vector', pattern='channel_centerline_0*').keys()) for channel in channels: channel_points = channel[:-7] + 'points_' + channel[-7:] v.to_points(input=channel, output=channel_points, type='line', dmax=0.002, overwrite=True) v.db_addcolumn(
r.mapcalc('tmpStreamZ = (tmpStream * 0 + 1) * tmp', quiet=True, overwrite=True) r.to_vect(input='tmpStreamZ', output='Line__' + DEM, type='line', quiet=True, overwrite=True) r.to_vect(input='tmpStreamZ', output='Points__' + DEM, type='point', column='z', quiet=True, overwrite=True) v.db_addcolumn(map='Points__' + DEM, columns='x double precision, y double precision', quiet=True) v.to_db(map='Points__' + DEM, option='coor', columns='x,y', quiet=True) """ # Tributary channel start_x = _x[drainarray[-2,:] == np.min(drainarray[-2,:])] # CHECK INDEXING (TOP/BOTTOM) if len(start_x) > 0: start_x = start_x[0] # ARBITRARY, SHOULD FIX SOMETIME, PROBABLY NOT IMPORTANT THOUGH. startpoint = str(start_x)+','+str(margin_bottom)/1000. r.drain(input='tmp', drain=tribThalweg, start_coordinates=startpoint) """ # Try r.sim.water """ # Old method with r.drain -- doesn't work well. Using r.watershed instead. DEMs = gscript.parse_command('g.list', type='raster', pattern='*__DEM__*').keys()
reach_columns.append('JRCH integer') reach_columns.append('NSEG integer') # = segment_id = ISEG reach_columns.append('ISEG integer') # = segment_id reach_columns.append('IREACH integer') reach_columns.append('RCHLEN integer') reach_columns.append('STRTOP double precision') reach_columns.append('SLOPE double precision') reach_columns.append('STRTHICK double precision') reach_columns = ",".join(reach_columns) # Create a map to work with v.extract(input='streams', output='tmp2', type='line', overwrite=True) v.overlay(ainput='tmp2', atype='line', binput='grid', output='reaches', operator='and', overwrite=True) v.db_addcolumn(map='reaches', columns=reach_columns) # Rename a,b columns v.db_renamecolumn(map='reaches', column=('a_x1', 'x1')) v.db_renamecolumn(map='reaches', column=('a_x2', 'x2')) v.db_renamecolumn(map='reaches', column=('a_y1', 'y1')) v.db_renamecolumn(map='reaches', column=('a_y2', 'y2')) v.db_renamecolumn(map='reaches', column=('a_stream_type', 'stream_type')) v.db_renamecolumn(map='reaches', column=('a_type_code', 'type_code')) v.db_renamecolumn(map='reaches', column=('a_cat', 'rnum_cat')) v.db_renamecolumn(map='reaches', column=('a_tostream', 'tostream')) v.db_renamecolumn(map='reaches', column=('a_id', 'segment_id')) v.db_renamecolumn(map='reaches', column=('a_OUTSEG', 'OUTSEG')) v.db_renamecolumn(map='reaches', column=('b_row', 'row')) v.db_renamecolumn(map='reaches', column=('b_col', 'col')) v.db_renamecolumn(map='reaches', column=('b_id', 'cell_id'))
column='distance' ) """ """ Method-2: This method will Join all the Tank Points using Triangulation method implemented in v.to.lines and the optimal distance is generated by using v.net.spawningtree. Creating a buffer for this route will give the pipe Layout for the given Pipe diameter. """ v.to_lines(input='TankPoints', output='JoinedTankPoints', flags=O) v.net_spanningtree(input='JoinedTankPoints', output='Layout2', flags=O) v.buffer(input='Layout2', output='Layout2_Buffer', distance=dia, flags=O) # Prepare Map for the Layout v.db_addcolumn(map='Buildings', columns='color varchar(10)') v.db_addcolumn(map='TankPoints', columns='size double precision') for id, color in enumerate(Colors, start=1): whr = 'id like %s' % id v.db_update(map='Buildings', column='color', where=whr, value=color) v.db_update(map='TankPoints', column='size', value=10) v.db_update(map='TankPoints', column='size', value=20, where='id like 1') ps = Module('ps.map') ps(input=Join('Test1', '#ps_scripts', 'GenLayout.psmap'), output='PipeLayout.ps', flags=O) #Removing the GRASS Vectors #g.remove(type='vector', pattern='*', flags=['f']) #EOF
def main(): """ Builds a grid for the MODFLOW component of the USGS hydrologic model, GSFLOW. """ options, flags = gscript.parser() basin = options["basin"] pp = options["pour_point"] raster_input = options["raster_input"] dx = options["dx"] dy = options["dy"] grid = options["output"] mask = options["mask_output"] bc_cell = options["bc_cell"] # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp' """ # Fatal if raster input and output are not both set _lena0 = (len(raster_input) == 0) _lenb0 = (len(raster_output) == 0) if _lena0 + _lenb0 == 1: gscript.fatal("You must set both raster input and output, or neither.") """ # Fatal if bc_cell set but mask and grid are false if bc_cell != "": if (mask == "") or (pp == ""): gscript.fatal( "Mask and pour point must be set to define b.c. cell") # Create grid -- overlaps DEM, three cells of padding g.region(raster=raster_input, ewres=dx, nsres=dy) gscript.use_temp_region() reg = gscript.region() reg_grid_edges_sn = np.linspace(reg["s"], reg["n"], reg["rows"]) reg_grid_edges_we = np.linspace(reg["w"], reg["e"], reg["cols"]) g.region(vector=basin, ewres=dx, nsres=dy) regnew = gscript.region() # Use a grid ratio -- don't match exactly the desired MODFLOW resolution grid_ratio_ns = np.round(regnew["nsres"] / reg["nsres"]) grid_ratio_ew = np.round(regnew["ewres"] / reg["ewres"]) # Get S, W, and then move the unit number of grid cells over to get N and E # and include 3 cells of padding around the whole watershed _s_dist = np.abs(reg_grid_edges_sn - (regnew["s"] - 3.0 * regnew["nsres"])) _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0] _s = float(reg_grid_edges_sn[_s_idx]) _n_grid = np.arange(_s, reg["n"] + 3 * grid_ratio_ns * reg["nsres"], grid_ratio_ns * reg["nsres"]) _n_dist = np.abs(_n_grid - (regnew["n"] + 3.0 * regnew["nsres"])) _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0] _n = float(_n_grid[_n_idx]) _w_dist = np.abs(reg_grid_edges_we - (regnew["w"] - 3.0 * regnew["ewres"])) _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0] _w = float(reg_grid_edges_we[_w_idx]) _e_grid = np.arange(_w, reg["e"] + 3 * grid_ratio_ew * reg["ewres"], grid_ratio_ew * reg["ewres"]) _e_dist = np.abs(_e_grid - (regnew["e"] + 3.0 * regnew["ewres"])) _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0] _e = float(_e_grid[_e_idx]) # Finally make the region g.region( w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg["nsres"]), ewres=str(grid_ratio_ew * reg["ewres"]), ) # And then make the grid v.mkgrid(map=grid, overwrite=gscript.overwrite()) # Cell numbers (row, column, continuous ID) v.db_addcolumn(map=grid, columns="id int", quiet=True) colNames = np.array(gscript.vector_db_select(grid, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(grid, layer=1)["values"].values()) cats = colValues[:, colNames == "cat"].astype(int).squeeze() rows = colValues[:, colNames == "row"].astype(int).squeeze() cols = colValues[:, colNames == "col"].astype(int).squeeze() nrows = np.max(rows) ncols = np.max(cols) cats = np.ravel([cats]) _id = np.ravel([ncols * (rows - 1) + cols]) _id_cat = [] for i in range(len(_id)): _id_cat.append((_id[i], cats[i])) gridTopo = VectorTopo(grid) gridTopo.open("rw") cur = gridTopo.table.conn.cursor() cur.executemany("update " + grid + " set id=? where cat=?", _id_cat) gridTopo.table.conn.commit() gridTopo.close() # Cell area v.db_addcolumn(map=grid, columns="area_m2 double precision", quiet=True) v.to_db(map=grid, option="area", units="meters", columns="area_m2", quiet=True) # Basin mask if len(mask) > 0: # Fine resolution region: g.region( n=reg["n"], s=reg["s"], w=reg["w"], e=reg["e"], nsres=reg["nsres"], ewres=reg["ewres"], ) # Rasterize basin v.to_rast( input=basin, output=mask, use="val", value=1, overwrite=gscript.overwrite(), quiet=True, ) # Coarse resolution region: g.region( w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg["nsres"]), ewres=str(grid_ratio_ew * reg["ewres"]), ) r.resamp_stats(input=mask, output=mask, method="sum", overwrite=True, quiet=True) r.mapcalc("tmp" + " = " + mask + " > 0", overwrite=True, quiet=True) g.rename(raster=("tmp", mask), overwrite=True, quiet=True) r.null(map=mask, null=0, quiet=True) # Add mask location (1 vs 0) in the MODFLOW grid v.db_addcolumn(map=grid, columns="basinmask double precision", quiet=True) v.what_rast(map=grid, type="centroid", raster=mask, column="basinmask") """ # Resampled raster if len(raster_output) > 0: r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True) """ # Pour point if len(pp) > 0: v.db_addcolumn(map=pp, columns=("row integer", "col integer"), quiet=True) v.build(map=pp, quiet=True) v.what_vect(map=pp, query_map=grid, column="row", query_column="row", quiet=True) v.what_vect(map=pp, query_map=grid, column="col", query_column="col", quiet=True) # Next point downstream of the pour point # Requires pp (always) and mask (sometimes) # Dependency set above w/ gscript.fatal # g.region(raster='DEM') # dx = gscript.region()['ewres'] # dy = gscript.region()['nsres'] if len(bc_cell) > 0: ########## NEED TO USE TRUE TEMPORARY FILE ########## # May not work with dx != dy! v.to_rast(input=pp, output="tmp", use="val", value=1, overwrite=True) r.buffer(input="tmp", output="tmp", distances=float(dx) * 1.5, overwrite=True) r.mapcalc("tmp2 = if(tmp==2,1,null()) * " + raster_input, overwrite=True) # r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True) # g.region(rast='tmp') # r.null(map=raster_input, # g.region(raster=raster_input) # r.resample(input=raster_input, output='tmp3', overwrite=True) r.resamp_stats(input=raster_input, output="tmp3", method="minimum", overwrite=True) r.drain(input="tmp3", start_points=pp, output="tmp", overwrite=True) # g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns*reg['nsres']), ewres=str(grid_ratio_ew*reg['ewres'])) # r.resamp_stats(input='tmp2', output='tmp3', overwrite=True) # g.rename(raster=('tmp3','tmp2'), overwrite=True, quiet=True) r.mapcalc("tmp3 = tmp2 * tmp", overwrite=True, quiet=True) g.rename(raster=("tmp3", "tmp"), overwrite=True, quiet=True) # r.null(map='tmp', setnull=0) # Not necessary: center point removed above r.to_vect( input="tmp", output=bc_cell, type="point", column="z", overwrite=gscript.overwrite(), quiet=True, ) v.db_addcolumn( map=bc_cell, columns=( "row integer", "col integer", "x double precision", "y double precision", ), quiet=True, ) v.build(map=bc_cell, quiet=True) v.what_vect(map=bc_cell, query_map=grid, column="row", query_column="row", quiet=True) v.what_vect(map=bc_cell, query_map=grid, column="col", query_column="col", quiet=True) v.to_db(map=bc_cell, option="coor", columns=("x,y")) # Of the candidates, the pour point is the closest one # v.db_addcolumn(map=bc_cell, columns=('dist_to_pp double precision'), quiet=True) # v.distance(from_=bc_cell, to=pp, upload='dist', column='dist_to_pp') # Find out if this is diagonal: finite difference works only N-S, W-E colNames = np.array(gscript.vector_db_select(pp, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(pp, layer=1)["values"].values()) pp_row = colValues[:, colNames == "row"].astype(int).squeeze() pp_col = colValues[:, colNames == "col"].astype(int).squeeze() colNames = np.array( gscript.vector_db_select(bc_cell, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(bc_cell, layer=1)["values"].values()) bc_row = colValues[:, colNames == "row"].astype(int).squeeze() bc_col = colValues[:, colNames == "col"].astype(int).squeeze() # Also get x and y while we are at it: may be needed later bc_x = colValues[:, colNames == "x"].astype(float).squeeze() bc_y = colValues[:, colNames == "y"].astype(float).squeeze() if (bc_row != pp_row).all() and (bc_col != pp_col).all(): if bc_row.ndim > 0: if len(bc_row) > 1: for i in range(len(bc_row)): """ UNTESTED!!!! And probably unimportant -- having 2 cells with river going through them is most likely going to happen with two adjacent cells -- so a side and a corner """ _col1, _row1 = str(bc_col[i]), str(pp_row[i]) _col2, _row2 = str(pp_col[i]), str(bc_row[i]) # Check if either of these is covered by the basin mask _ismask_1 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row1 + ") AND (col ==" + _col1 + ")", columns="basinmask", ) _ismask_1 = int(_ismask_1["values"].values()[0][0]) _ismask_2 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row2 + ") AND (col ==" + _col2 + ")", columns="basinmask", ) _ismask_2 = int(_ismask_2["values"].values()[0][0]) # check if either of these is the other point """ NOT DOING THIS YET -- HAVEN'T THOUGHT THROUGH IF ACTUALLY NECESSARY. (And this is an edge case anyway) """ # If both covered by mask, error if _ismask_1 and _ismask_2: gscript.fatal( "All possible b.c. cells covered by basin mask.\n\ Contact the developer: awickert (at) umn(.)edu" ) # If not diagonal, two possible locations that are adjacent # to the pour point _col1, _row1 = str(bc_col), str(pp_row) _col2, _row2 = str(pp_col), str(bc_row) # Check if either of these is covered by the basin mask _ismask_1 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row1 + ") AND (col ==" + _col1 + ")", columns="basinmask", ) _ismask_1 = int(_ismask_1["values"].values()[0][0]) _ismask_2 = gscript.vector_db_select( grid, layer=1, where="(row == " + _row2 + ") AND (col ==" + _col2 + ")", columns="basinmask", ) _ismask_2 = int(_ismask_2["values"].values()[0][0]) # If both covered by mask, error if _ismask_1 and _ismask_2: gscript.fatal( "All possible b.c. cells covered by basin mask.\n\ Contact the developer: awickert (at) umn(.)edu") # Otherwise, those that keep those that are not covered by basin # mask and set ... # ... wait, do we want the point that touches as few interior # cells as possible? # maybe just try setting both and seeing what happens for now! else: # Get dx and dy # dx = gscript.region()['ewres'] # dy = gscript.region()['nsres'] # Build tool to handle multiple b.c. cells? bcvect = vector.Vector(bc_cell) bcvect.open("rw") _cat_i = 2 if _ismask_1 != 0: # _x should always be bc_x, but writing generalized code _x = bc_x + float(dx) * (int(_col1) - bc_col ) # col 1 at w edge _y = bc_y - float(dy) * (int(_row1) - bc_row ) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row1, _col1, _x, _y), ) bcvect.table.conn.commit() _cat_i += 1 if _ismask_2 != 0: # _y should always be bc_y, but writing generalized code _x = bc_x + float(dx) * (int(_col2) - bc_col ) # col 1 at w edge _y = bc_y - float(dy) * (int(_row2) - bc_row ) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row2, _col2, _x, _y), ) bcvect.table.conn.commit() # Build database table and vector geometry bcvect.build() bcvect.close() g.region( n=reg["n"], s=reg["s"], w=reg["w"], e=reg["e"], nsres=reg["nsres"], ewres=reg["ewres"], )
r.patch(input='boundaries,'+DEM, output='tmp', overwrite=True) drainarray.read('tmp') scanName = DEM.split('__DEM__')[0] mainThalweg = scanName + '__main_thalweg__' tribThalweg = scanName + '__trib_thalweg__' # Main channel #start_x = margin_left/1000. #start_y = _y[:,1][drainarray[:,1] == np.min(drainarray[:,1])] flowIn = garray.array() flowIn[:,2][drainarray[:,2] < (np.min(drainarray[:,2])+.01)] = 1 flowIn.write('tmpFlowIn', overwrite=True) r.watershed(elevation='tmp', flow='tmpFlowIn', threshold=np.sum(flowIn), stream='tmpStream', accumulation='tmpAccum', flags='s', overwrite=True) r.mapcalc('tmpStreamZ = (tmpStream * 0 + 1) * tmp', overwrite=True) r.to_vect(input='tmpStreamZ', output='tmpStreamLine', type='line', overwrite=True) r.to_vect(input='tmpStreamZ', output='tmpStreamPoints', type='point', column='z', overwrite=True) v.db_addcolumn(map='tmpStreamPoints', columns='x double precision, y double precision') v.to_db(map='tmpStreamPoints', option='coor', columns='x,y') """ # Tributary channel start_x = _x[drainarray[-2,:] == np.min(drainarray[-2,:])] # CHECK INDEXING (TOP/BOTTOM) if len(start_x) > 0: start_x = start_x[0] # ARBITRARY, SHOULD FIX SOMETIME, PROBABLY NOT IMPORTANT THOUGH. startpoint = str(start_x)+','+str(margin_bottom)/1000. r.drain(input='tmp', drain=tribThalweg, start_coordinates=startpoint) """ # Try r.sim.water
def main(): """ Builds river segments for input to the USGS hydrologic models PRMS and GSFLOW. """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() # I/O streams = options["input"] segments = options["output"] # Hydraulic geometry ICALC = int(options["icalc"]) # ICALC=0: Constant depth WIDTH1 = options["width1"] WIDTH2 = options["width2"] # ICALC=1,2: Manning (in channel and overbank): below # ICALC=3: Power-law relationships (following Leopold and others) # The at-a-station default exponents are from Rhodes (1977) CDPTH = str(float(options["cdpth"]) / 35.3146667) # cfs to m^3/s FDPTH = options["fdpth"] AWDTH = str(float(options["awdth"]) / 35.3146667) # cfs to m^3/s BWDTH = options["bwdth"] ################################################## # CHECKING DEPENDENCIES WITH OPTIONAL PARAMETERS # ################################################## if ICALC == 3: if CDPTH and FDPTH and AWDTH and BWDTH: pass else: gscript.fatal("Missing CDPTH, FDPTH, AWDTH, and/or BWDTH. \ These are required when ICALC = 3.") ########### # RUNNING # ########### # New Columns for Segments segment_columns = [] # Self ID segment_columns.append("id integer") # segment number segment_columns.append("ISEG integer") # segment number segment_columns.append("NSEG integer") # segment number # for GSFLOW segment_columns.append( "ICALC integer" ) # 1 for channel, 2 for channel+fp, 3 for power function segment_columns.append( "OUTSEG integer") # downstream segment -- tostream, renumbered segment_columns.append("ROUGHCH double precision") # overbank roughness segment_columns.append("ROUGHBK double precision") # in-channel roughness segment_columns.append("WIDTH1 double precision") # overbank roughness segment_columns.append("WIDTH2 double precision") # in-channel roughness segment_columns.append("CDPTH double precision") # depth coeff segment_columns.append("FDPTH double precision") # depth exp segment_columns.append("AWDTH double precision") # width coeff segment_columns.append("BWDTH double precision") # width exp segment_columns.append( "floodplain_width double precision" ) # floodplain width (8-pt approx channel + flat fp) # The below will be all 0 segment_columns.append( "IUPSEG varchar") # upstream segment ID number, for diversions segment_columns.append("FLOW varchar") segment_columns.append("RUNOFF varchar") segment_columns.append("ETSW varchar") segment_columns.append("PPTSW varchar") segment_columns = ",".join(segment_columns) # CONSIDER THE EFFECT OF OVERWRITING COLUMNS -- WARN FOR THIS # IF MAP EXISTS ALREADY? # Create a map to work with g.copy(vector=(streams, segments), overwrite=gscript.overwrite()) # and add its columns v.db_addcolumn(map=segments, columns=segment_columns) # Produce the data table entries ################################## colNames = np.array(gscript.vector_db_select(segments, layer=1)["columns"]) colValues = np.array( gscript.vector_db_select(segments, layer=1)["values"].values()) number_of_segments = colValues.shape[0] cats = colValues[:, colNames == "cat"].astype(int).squeeze() nseg = np.arange(1, len(cats) + 1) nseg_cats = [] for i in range(len(cats)): nseg_cats.append((nseg[i], cats[i])) segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() # id = cat (as does ISEG and NSEG) cur.executemany("update " + segments + " set id=? where cat=?", nseg_cats) cur.executemany("update " + segments + " set ISEG=? where cat=?", nseg_cats) cur.executemany("update " + segments + " set NSEG=? where cat=?", nseg_cats) # outseg = tostream: default is 0 if "tostream" is off-map cur.execute("update " + segments + " set OUTSEG=0") cur.executemany("update " + segments + " set OUTSEG=? where tostream=?", nseg_cats) # Hydraulic geometry selection cur.execute("update " + segments + " set ICALC=" + str(ICALC)) segmentsTopo.table.conn.commit() segmentsTopo.close() if ICALC == 0: gscript.message("") gscript.message("ICALC=0 (constant) not supported") gscript.message("Continuing nonetheless.") gscript.message("") if ICALC == 1: if options["width_points"] is not "": # Can add machinery here for separate upstream and downstream widths # But really should not vary all that much # v.to_db(map=segments, option='start', columns='xr1,yr1') # v.to_db(map=segments, option='end', columns='xr2,yr2') gscript.run_command( "v.distance", from_=segments, to=options["width_points"], upload="to_attr", to_column=options["width_points_col"], column="WIDTH1", ) v.db_update(map=segments, column="WIDTH2", query_column="WIDTH1") else: segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() cur.execute("update " + segments + " set WIDTH1=" + str(WIDTH1)) cur.execute("update " + segments + " set WIDTH2=" + str(WIDTH2)) segmentsTopo.table.conn.commit() segmentsTopo.close() if ICALC == 2: # REMOVE THIS MESSAGE ONCE THIS IS INCLUDED IN INPUT-FILE BUILDER gscript.message("") gscript.message("ICALC=2 (8-point channel + floodplain) not supported") gscript.message("Continuing nonetheless.") gscript.message("") if options["fp_width_pts"] is not "": gscript.run_command( "v.distance", from_=segments, to=options["fp_width_pts"], upload="to_attr", to_column=options["fp_width_pts_col"], column="floodplain_width", ) else: segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() cur.execute("update " + segments + " set floodplain_width=" + str(options["fp_width_value"])) segmentsTopo.table.conn.commit() segmentsTopo.close() if ICALC == 3: segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() cur.execute("update " + segments + " set CDPTH=" + str(CDPTH)) cur.execute("update " + segments + " set FDPTH=" + str(FDPTH)) cur.execute("update " + segments + " set AWDTH=" + str(AWDTH)) cur.execute("update " + segments + " set BWDTH=" + str(BWDTH)) segmentsTopo.table.conn.commit() segmentsTopo.close() # values that are 0 gscript.message("") gscript.message("NOTICE: not currently used:") gscript.message("IUPSEG, FLOW, RUNOFF, ETSW, and PPTSW.") gscript.message("All set to 0.") gscript.message("") segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() cur.execute("update " + segments + " set IUPSEG=" + str(0)) cur.execute("update " + segments + " set FLOW=" + str(0)) cur.execute("update " + segments + " set RUNOFF=" + str(0)) cur.execute("update " + segments + " set ETSW=" + str(0)) cur.execute("update " + segments + " set PPTSW=" + str(0)) segmentsTopo.table.conn.commit() segmentsTopo.close() # Roughness # ICALC=1,2: Manning (in channel) if (options["roughch_raster"] is not "") and (options["roughch_points"] is not ""): gscript.fatal( "Choose either a raster or vector or a value as Manning's n input." ) if options["roughch_raster"] is not "": ROUGHCH = options["roughch_raster"] v.rast_stats( raster=ROUGHCH, method="average", column_prefix="tmp", map=segments, flags="c", ) # v.db_renamecolumn(map=segments, column='tmp_average,ROUGHCH', quiet=True) v.db_update(map=segments, column="ROUGHCH", query_column="tmp_average", quiet=True) v.db_dropcolumn(map=segments, columns="tmp_average", quiet=True) elif options["roughch_points"] is not "": ROUGHCH = options["roughch_points"] gscript.run_command( "v.distance", from_=segments, to=ROUGHCH, upload="to_attr", to_column=options["roughch_pt_col"], column="ROUGHCH", ) else: segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() ROUGHCH = options["roughch_value"] cur.execute("update " + segments + " set ROUGHCH=" + str(ROUGHCH)) segmentsTopo.table.conn.commit() segmentsTopo.close() # ICALC=2: Manning (overbank) if (options["roughbk_raster"] is not "") and (options["roughbk_points"] is not ""): gscript.fatal( "Choose either a raster or vector or a value as Manning's n input." ) if options["roughbk_raster"] is not "": ROUGHBK = options["roughbk_raster"] v.rast_stats( raster=ROUGHBK, method="average", column_prefix="tmp", map=segments, flags="c", ) v.db_renamecolumn(map=segments, column="tmp_average,ROUGHBK", quiet=True) elif options["roughbk_points"] is not "": ROUGHBK = options["roughbk_points"] gscript.run_command( "v.distance", from_=segments, to=ROUGHBK, upload="to_attr", to_column=options["roughbk_pt_col"], column="ROUGHBK", ) else: segmentsTopo = VectorTopo(segments) segmentsTopo.open("rw") cur = segmentsTopo.table.conn.cursor() ROUGHBK = options["roughbk_value"] cur.execute("update " + segments + " set ROUGHBK=" + str(ROUGHBK)) segmentsTopo.table.conn.commit() segmentsTopo.close()
def main(): """ Import any raster or vector data set and add its attribute to a GSFLOW data object """ ################## # OPTION PARSING # ################## options, flags = gscript.parser() # Parsing if options["attrtype"] == "int": attrtype = "integer" elif options["attrtype"] == "float": attrtype = "double precision" elif options["attrtype"] == "string": attrtype = "varchar" else: attrtype = "" ######################################## # PROCESS AND UPLOAD TO DATABASE TABLE # ######################################## if options["vector_area"] is not "": gscript.use_temp_region() g.region(vector=options["map"], res=options["dxy"]) v.to_rast( input=options["vector_area"], output="tmp___tmp", use="attr", attribute_column=options["from_column"], quiet=True, overwrite=True, ) try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns=options["column"], quiet=True) except: pass if attrtype is "double precision": try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns="tmp_average", quiet=True) except: pass v.rast_stats( map=options["map"], raster="tmp___tmp", column_prefix="tmp", method="average", flags="c", quiet=True, ) g.remove(type="raster", name="tmp___tmp", flags="f", quiet=True) v.db_renamecolumn( map=options["map"], column=["tmp_average", options["column"]], quiet=True, ) else: try: v.db_addcolumn( map=options["map"], columns=options["column"] + " " + attrtype, quiet=True, ) except: pass gscript.run_command( "v.distance", from_=options["map"], to=options["vector_area"], upload="to_attr", to_column=options["from_column"], column=options["column"], quiet=True, ) elif options["vector_points"] is not "": try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns=options["column"], quiet=True) v.db_addcolumn( map=options["map"], columns=options["column"] + " " + attrtype, quiet=True, ) except: pass gscript.run_command( "v.distance", from_=options["map"], to=options["vector_points"], upload="to_attr", to_column=options["from_column"], column=options["column"], quiet=True, ) elif options["raster"] is not "": try: gscript.message("Checking for existing column to overwrite") v.db_dropcolumn(map=options["map"], columns=options["column"], quiet=True) except: pass v.rast_stats( map=options["map"], raster=options["raster"], column_prefix="tmp", method="average", flags="c", quiet=True, ) v.db_renamecolumn(map=options["map"], column=["tmp_average", options["column"]], quiet=True) gscript.message("Done.")
def main(): """ Builds a grid for the MODFLOW component of the USGS hydrologic model, GSFLOW. """ options, flags = gscript.parser() basin = options['basin'] pp = options['pour_point'] raster_input = options['raster_input'] dx = options['dx'] dy = options['dy'] grid = options['output'] mask = options['mask_output'] bc_cell = options['bc_cell'] # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp' """ # Fatal if raster input and output are not both set _lena0 = (len(raster_input) == 0) _lenb0 = (len(raster_output) == 0) if _lena0 + _lenb0 == 1: grass.fatal("You must set both raster input and output, or neither.") """ # Create grid -- overlaps DEM, one cell of padding gscript.use_temp_region() reg = gscript.region() reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows']) reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols']) g.region(vector=basin, ewres=dx, nsres=dy) regnew = gscript.region() # Use a grid ratio -- don't match exactly the desired MODFLOW resolution grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres']) grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres']) # Get S, W, and then move the unit number of grid cells over to get N and E # and include 3 cells of padding around the whole watershed _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres'])) _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0] _s = float(reg_grid_edges_sn[_s_idx]) _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'], grid_ratio_ns * reg['nsres']) _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres'])) _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0] _n = float(_n_grid[_n_idx]) _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres'])) _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0] _w = float(reg_grid_edges_we[_w_idx]) _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'], grid_ratio_ew * reg['ewres']) _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres'])) _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0] _e = float(_e_grid[_e_idx]) # Finally make the region g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) # And then make the grid v.mkgrid(map=grid, overwrite=gscript.overwrite()) # Cell numbers (row, column, continuous ID) v.db_addcolumn(map=grid, columns='id int', quiet=True) colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(grid, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() rows = colValues[:, colNames == 'row'].astype(int).squeeze() cols = colValues[:, colNames == 'col'].astype(int).squeeze() nrows = np.max(rows) ncols = np.max(cols) cats = np.ravel([cats]) _id = np.ravel([ncols * (rows - 1) + cols]) _id_cat = [] for i in range(len(_id)): _id_cat.append((_id[i], cats[i])) gridTopo = VectorTopo(grid) gridTopo.open('rw') cur = gridTopo.table.conn.cursor() cur.executemany("update " + grid + " set id=? where cat=?", _id_cat) gridTopo.table.conn.commit() gridTopo.close() # Cell area v.db_addcolumn(map=grid, columns='area_m2', quiet=True) v.to_db(map=grid, option='area', units='meters', columns='area_m2', quiet=True) # Basin mask if len(mask) > 0: # Fine resolution region: g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres']) # Rasterize basin v.to_rast(input=basin, output=mask, use='val', value=1, overwrite=gscript.overwrite(), quiet=True) # Coarse resolution region: g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) r.resamp_stats(input=mask, output=mask, method='sum', overwrite=True, quiet=True) r.mapcalc(mask + ' = ' + mask + ' > 0', overwrite=True, quiet=True) """ # Resampled raster if len(raster_output) > 0: r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True) """ # Pour point if len(pp) > 0: v.db_addcolumn(map=pp, columns=('row integer', 'col integer'), quiet=True) v.build(map=pp, quiet=True) v.what_vect(map=pp, query_map=grid, column='row', query_column='row', quiet=True) v.what_vect(map=pp, query_map=grid, column='col', query_column='col', quiet=True) # Next point downstream of the pour point if len(bc_cell) > 0: ########## NEED TO USE TRUE TEMPORARY FILE ########## # May not work with dx != dy! v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True) r.buffer(input='tmp', output='tmp', distances=float(dx) * 1.5, overwrite=True) r.mapcalc('tmp = (tmp == 2) * ' + raster_input, overwrite=True) r.drain(input=raster_input, start_points=pp, output='tmp2', overwrite=True) r.mapcalc('tmp = tmp2 * tmp', overwrite=True) r.null(map='tmp', setnull=0) r.to_vect(input='tmp', output=bc_cell, type='point', column='z', overwrite=gscript.overwrite(), quiet=True) v.db_addcolumn(map=bc_cell, columns=('row integer', 'col integer'), quiet=True) v.build(map=bc_cell, quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='row', \ query_column='row', quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='col', \ query_column='col', quiet=True) g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres'])
print DEM g.rename(vector=['Line__'+DEM,'channel_centerline_'+DEM.split('_')[-1]]) #(input='tmpStreamZ', output='channel_centerline_'+DEM.split('_')[-1], type='line', quiet=True, overwrite=True) """ channels = sorted( gscript.parse_command('g.list', type='vector', pattern='channel_centerline_0*').keys()) for channel in channels: channel_points = channel[:-7] + 'points_' + channel[-7:] v.to_points(input=channel, output=channel_points, type='line', dmax=0.002, overwrite=True) v.db_addcolumn( map=channel_points, layer=2, columns='x double precision, y double precision, z double precision') v.to_db(map=channel_points, option='coor', columns='x,y', layer=2) v.what_rast(map=channel_points, layer=2, raster='DEM_' + channel[-7:], column='z') v.db_select(map=channel_points, layer=2, separator=',', file=channel_points + '.csv', overwrite=True)
def raster_to_vector(raster_category_flow, vector_category_flow, flow_column_name, category, type): """Converts a raster to a vector map Parameters ---------- raster_category_flow : Name of the input raster map 'flow in category' vector_category_flow : Name for the output vector map 'flow in category' type : Type for the output vector map Returns ------- Examples -------- .. """ msg = " * Vectorising raster map '{r}'" grass.verbose( _( msg.format( c=category, r=raster_category_flow, v=vector_category_flow, ))) r.to_vect( input=raster_category_flow, output=vector_category_flow, type="area", quiet=True, ) msg = " * Updating the attribute table" grass.verbose(_(msg)) # Value is the ecosystem type v.db_renamecolumn( map=vector_category_flow, column=("value", "ecosystem"), quiet=True, ) # New column for flow values addcolumn_string = flow_column_name + " double" v.db_addcolumn( map=vector_category_flow, columns=addcolumn_string, quiet=True, ) # The raster category 'label' is the 'flow' v.db_update( map=vector_category_flow, column="flow", query_column="label", quiet=True, ) v.db_dropcolumn( map=vector_category_flow, columns="label", quiet=True, ) # Update the aggregation raster categories v.db_addcolumn( map=vector_category_flow, columns="aggregation_id int", quiet=True, ) v.db_update( map=vector_category_flow, column="aggregation_id", value=category, quiet=True, ) v.colors( map=vector_category_flow, raster=raster_category_flow, quiet=True, )
def main(): """ Builds a grid for the MODFLOW component of the USGS hydrologic model, GSFLOW. """ options, flags = gscript.parser() basin = options['basin'] pp = options['pour_point'] raster_input = options['raster_input'] dx = options['dx'] dy = options['dy'] grid = options['output'] mask = options['mask_output'] bc_cell = options['bc_cell'] # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp' """ # Fatal if raster input and output are not both set _lena0 = (len(raster_input) == 0) _lenb0 = (len(raster_output) == 0) if _lena0 + _lenb0 == 1: gscript.fatal("You must set both raster input and output, or neither.") """ # Fatal if bc_cell set but mask and grid are false if bc_cell != '': if (mask == '') or (pp == ''): gscript.fatal( 'Mask and pour point must be set to define b.c. cell') # Create grid -- overlaps DEM, three cells of padding gscript.use_temp_region() reg = gscript.region() reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows']) reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols']) g.region(vector=basin, ewres=dx, nsres=dy) regnew = gscript.region() # Use a grid ratio -- don't match exactly the desired MODFLOW resolution grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres']) grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres']) # Get S, W, and then move the unit number of grid cells over to get N and E # and include 3 cells of padding around the whole watershed _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres'])) _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0] _s = float(reg_grid_edges_sn[_s_idx]) _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'], grid_ratio_ns * reg['nsres']) _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres'])) _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0] _n = float(_n_grid[_n_idx]) _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres'])) _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0] _w = float(reg_grid_edges_we[_w_idx]) _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'], grid_ratio_ew * reg['ewres']) _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres'])) _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0] _e = float(_e_grid[_e_idx]) # Finally make the region g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) # And then make the grid v.mkgrid(map=grid, overwrite=gscript.overwrite()) # Cell numbers (row, column, continuous ID) v.db_addcolumn(map=grid, columns='id int', quiet=True) colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(grid, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() rows = colValues[:, colNames == 'row'].astype(int).squeeze() cols = colValues[:, colNames == 'col'].astype(int).squeeze() nrows = np.max(rows) ncols = np.max(cols) cats = np.ravel([cats]) _id = np.ravel([ncols * (rows - 1) + cols]) _id_cat = [] for i in range(len(_id)): _id_cat.append((_id[i], cats[i])) gridTopo = VectorTopo(grid) gridTopo.open('rw') cur = gridTopo.table.conn.cursor() cur.executemany("update " + grid + " set id=? where cat=?", _id_cat) gridTopo.table.conn.commit() gridTopo.close() # Cell area v.db_addcolumn(map=grid, columns='area_m2', quiet=True) v.to_db(map=grid, option='area', units='meters', columns='area_m2', quiet=True) # Basin mask if len(mask) > 0: # Fine resolution region: g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres']) # Rasterize basin v.to_rast(input=basin, output=mask, use='val', value=1, overwrite=gscript.overwrite(), quiet=True) # Coarse resolution region: g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns * reg['nsres']), ewres=str(grid_ratio_ew * reg['ewres'])) r.resamp_stats(input=mask, output=mask, method='sum', overwrite=True, quiet=True) r.mapcalc('tmp' + ' = ' + mask + ' > 0', overwrite=True, quiet=True) g.rename(raster=('tmp', mask), overwrite=True, quiet=True) r.null(map=mask, null=0, quiet=True) # Add mask location (1 vs 0) in the MODFLOW grid v.db_addcolumn(map=grid, columns='basinmask double precision', quiet=True) v.what_rast(map=grid, type='centroid', raster=mask, column='basinmask') """ # Resampled raster if len(raster_output) > 0: r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True) """ # Pour point if len(pp) > 0: v.db_addcolumn(map=pp, columns=('row integer', 'col integer'), quiet=True) v.build(map=pp, quiet=True) v.what_vect(map=pp, query_map=grid, column='row', query_column='row', quiet=True) v.what_vect(map=pp, query_map=grid, column='col', query_column='col', quiet=True) # Next point downstream of the pour point # Requires pp (always) and mask (sometimes) # Dependency set above w/ gscript.fatal if len(bc_cell) > 0: ########## NEED TO USE TRUE TEMPORARY FILE ########## # May not work with dx != dy! v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True) r.buffer(input='tmp', output='tmp', distances=float(dx) * 1.5, overwrite=True) r.mapcalc('tmp2 = if(tmp==2,1,null()) * ' + raster_input, overwrite=True) g.rename(raster=('tmp2', 'tmp'), overwrite=True, quiet=True) #r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True) #g.region(rast='tmp') #r.null(map=raster_input, r.drain(input=raster_input, start_points=pp, output='tmp2', overwrite=True) r.mapcalc('tmp3 = tmp2 * tmp', overwrite=True, quiet=True) g.rename(raster=('tmp3', 'tmp'), overwrite=True, quiet=True) #r.null(map='tmp', setnull=0) # Not necessary: center point removed above r.to_vect(input='tmp', output=bc_cell, type='point', column='z', overwrite=gscript.overwrite(), quiet=True) v.db_addcolumn(map=bc_cell, columns=('row integer', 'col integer', 'x double precision', 'y double precision'), quiet=True) v.build(map=bc_cell, quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='row', \ query_column='row', quiet=True) v.what_vect(map=bc_cell, query_map=grid, column='col', \ query_column='col', quiet=True) v.to_db(map=bc_cell, option='coor', columns=('x,y')) # Find out if this is diagonal: finite difference works only N-S, W-E colNames = np.array(gscript.vector_db_select(pp, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(pp, layer=1)['values'].values()) pp_row = int(colValues[:, colNames == 'row'].astype(int).squeeze()) pp_col = int(colValues[:, colNames == 'col'].astype(int).squeeze()) colNames = np.array( gscript.vector_db_select(bc_cell, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(bc_cell, layer=1)['values'].values()) bc_row = int(colValues[:, colNames == 'row'].astype(int).squeeze()) bc_col = int(colValues[:, colNames == 'col'].astype(int).squeeze()) # Also get x and y while we are at it: may be needed later bc_x = float(colValues[:, colNames == 'x'].astype(float).squeeze()) bc_y = float(colValues[:, colNames == 'y'].astype(float).squeeze()) if (bc_row != pp_row) and (bc_col != pp_col): # If not diagonal, two possible locations that are adjacent # to the pour point _col1, _row1 = str(bc_col), str(pp_row) _col2, _row2 = str(pp_col), str(bc_row) # Check if either of these is covered by the basin mask _ismask_1 = gscript.vector_db_select(grid, layer=1, where='(row == ' + _row1 + ') AND (col ==' + _col1 + ')', columns='basinmask') _ismask_1 = int(_ismask_1['values'].values()[0][0]) _ismask_2 = gscript.vector_db_select(grid, layer=1, where='(row == ' + _row2 + ') AND (col ==' + _col2 + ')', columns='basinmask') _ismask_2 = int(_ismask_2['values'].values()[0][0]) # If both covered by mask, error if _ismask_1 and _ismask_2: gscript.fatal( 'All possible b.c. cells covered by basin mask.\n\ Contact the developer: awickert (at) umn(.)edu') # Otherwise, those that keep those that are not covered by basin # mask and set ... # ... wait, do we want the point that touches as few interior # cells as possible? # maybe just try setting both and seeing what happens for now! else: # Get dx and dy dx = gscript.region()['ewres'] dy = gscript.region()['nsres'] # Build tool to handle multiple b.c. cells? bcvect = vector.Vector(bc_cell) bcvect.open('rw') _cat_i = 2 if not _ismask_1: # _x should always be bc_x, but writing generalized code _x = bc_x + dx * (int(_col1) - bc_col) # col 1 at w edge _y = bc_y - dy * (int(_row1) - bc_row) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row1, _col1, _x, _y), ) bcvect.table.conn.commit() _cat_i += 1 if not _ismask_2: # _y should always be bc_y, but writing generalized code _x = bc_x + dx * (int(_col2) - bc_col) # col 1 at w edge _y = bc_y - dy * (int(_row2) - bc_row) # row 1 at n edge point0 = Point(_x, _y) bcvect.write( point0, cat=_cat_i, attrs=(None, _row2, _col2, _x, _y), ) bcvect.table.conn.commit() # Build database table and vector geometry bcvect.build() bcvect.close() g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres'])
def Generate(Qa): """ This function is a recursive function that will call on its own unless both the Del values are less than or equal to 2.5 """ Dels = [] global Iterations Iterations += 1 for net in Network: Arcs = [net[i:i + 2] for i in range(3)] Hl, Hl_Qa = {}, {} # Head Loss and Head Loss / Qa for arc in Arcs: Hl[arc] = (Qa[arc]**2) * k[arc] * Dir[arc] Hl_Qa[arc] = abs(Hl[arc] / Qa[arc]) Del = -sum(Hl.values()) / (n * sum(Hl_Qa.values())) Dels.append(Del) # Append the Overall Dels to O_Del O_Del.append(Dels) # Adding attribute columns with the values v.db_addcolumn(map='myNet', columns='value double precision') for net in Network: Arcs = [net[i:i + 2] for i in range(3)] for arc in Arcs: whr = "name like '%s'" % arc.lower() v.db_update(map='myNet', column='value', where=whr, value=round(abs(Qa[arc]), 3)) v.label(map='myNet', type='line', column='value', labels='Line', size='5', opaque='no', color='red') v.label(map='TankPoints', type='point', column='name', labels='TankPoints', size='5', opaque='no', color='red') # Prepare Map ps = Module('ps.map') ps(input=Join('Test1', '#ps_scripts', 'GenNetwork.psmap'), output='Network-Iteration-%s.ps' % Iterations, flags=O) v.db_dropcolumn(map='myNet', columns='value') if not all(i >= con for i in Dels): New_Qa = {} for id, net in enumerate(Network): Arcs = [net[i:i + 2] for i in range(3)] for arc in Arcs: if arc == 'BD': New_Qa[arc] = Qa[arc] * Dir[arc] + Dels[id] - Dels[int( not id)] elif arc == 'DB': New_Qa[arc] = Qa[arc] * Dir[arc] + Dels[id] - Dels[int( not id)] else: New_Qa[arc] = Qa[arc] * Dir[arc] + Dels[id] Generate(New_Qa) else: print("\n\n") print("Number of Iterations: %s\n" % Iterations) pprint(Qa)