def readPoint(xy): DEM = RasterRow(options['elevation']) DEM.open('r') point = Point(xy[0], xy[1]) DEM.close() return DEM.get_value(point)
def _getData(self, timeseries): """Load data and read properties :param list timeseries: a list of timeseries """ self.timeData = OrderedDict() mode = None unit = None columns = ','.join(['name', 'start_time', 'end_time']) for series in timeseries: name = series[0] fullname = name + '@' + series[1] etype = series[2] sp = tgis.dataset_factory(etype, fullname) sp.select(dbif=self.dbif) self.timeData[name] = OrderedDict() if not sp.is_in_db(dbif=self.dbif): GError(self, message=_("Dataset <%s> not found in temporal " "database") % (fullname)) return self.timeData[name]['temporalDataType'] = etype self.timeData[name]['temporalType'] = sp.get_temporal_type() self.timeData[name]['granularity'] = sp.get_granularity() if mode is None: mode = self.timeData[name]['temporalType'] elif self.timeData[name]['temporalType'] != mode: GError(parent=self, message=_("Datasets have different temporal" " type (absolute x relative), " "which is not allowed.")) return # check topology maps = sp.get_registered_maps_as_objects(dbif=self.dbif) self.timeData[name]['validTopology'] = sp.check_temporal_topology(maps=maps, dbif=self.dbif) self.timeData[name]['unit'] = None # only with relative if self.timeData[name]['temporalType'] == 'relative': start, end, self.timeData[name]['unit'] = sp.get_relative_time() if unit is None: unit = self.timeData[name]['unit'] elif self.timeData[name]['unit'] != unit: GError(self, _("Datasets have different time unit which " "is not allowed.")) return rows = sp.get_registered_maps(columns=columns, where=None, order='start_time', dbif=self.dbif) for row in rows: self.timeData[name][row[0]] = {} self.timeData[name][row[0]]['start_datetime'] = row[1] self.timeData[name][row[0]]['end_datetime'] = row[2] r = RasterRow(row[0]) r.open() val = r.get_value(self.poi) r.close() self.timeData[name][row[0]]['value'] = val self.unit = unit self.temporalType = mode return
def readPoint(raster, point): return raster.get_value(point) def readPoint(xy): DEM = RasterRow(options['elevation']) DEM.open('r') point = Point(xy[0], xy[1]) DEM.close() return DEM.get_value(point) coordsList = [] for point in coords[-1]: coordsList.append([point.x, point.y]) p = Pool(3) p.map(readPoint, coordsList) DEM = RasterRow(options['elevation']) DEM.open('r') z = [] for __i in range(len(netcats)): cat = netcats[__i] zsub = [] for _j in range(len(E[__i])): zsub.append(DEM.get_value(Point(E[__i][_j], N[__i][_j]))) z.append(zsub)
def _getSTRDdata(self, timeseries): """Load data and read properties :param list timeseries: a list of timeseries """ if not self.poi: GError(parent=self, message=_("Invalid input coordinates"), showTraceback=False) return mode = None unit = None columns = ','.join(['name', 'start_time', 'end_time']) for series in timeseries: name = series[0] fullname = name + '@' + series[1] etype = series[2] sp = tgis.dataset_factory(etype, fullname) if not sp.is_in_db(dbif=self.dbif): GError(message=_("Dataset <%s> not found in temporal " "database") % (fullname), parent=self) return sp.select(dbif=self.dbif) minmin = sp.metadata.get_min_min() self.plotNameListR.append(name) self.timeDataR[name] = OrderedDict() self.timeDataR[name]['temporalDataType'] = etype self.timeDataR[name]['temporalType'] = sp.get_temporal_type() self.timeDataR[name]['granularity'] = sp.get_granularity() if mode is None: mode = self.timeDataR[name]['temporalType'] elif self.timeDataR[name]['temporalType'] != mode: GError(parent=self, message=_("Datasets have different temporal" " type (absolute x relative), " "which is not allowed.")) return # check topology maps = sp.get_registered_maps_as_objects(dbif=self.dbif) self.timeDataR[name]['validTopology'] = sp.check_temporal_topology( maps=maps, dbif=self.dbif) self.timeDataR[name]['unit'] = None # only with relative if self.timeDataR[name]['temporalType'] == 'relative': start, end, self.timeDataR[name][ 'unit'] = sp.get_relative_time() if unit is None: unit = self.timeDataR[name]['unit'] elif self.timeDataR[name]['unit'] != unit: GError(parent=self, message=_("Datasets have different " "time unit which is not " "allowed.")) return rows = sp.get_registered_maps(columns=columns, where=None, order='start_time', dbif=self.dbif) for row in rows: self.timeDataR[name][row[0]] = {} self.timeDataR[name][row[0]]['start_datetime'] = row[1] self.timeDataR[name][row[0]]['end_datetime'] = row[2] r = RasterRow(row[0]) r.open() val = r.get_value(self.poi) r.close() if val == -2147483648 and val < minmin: self.timeDataR[name][row[0]]['value'] = None else: self.timeDataR[name][row[0]]['value'] = val self.unit = unit self.temporalType = mode return
points_in_streams = [] # 2. Get coordinates for row in streamsTopo: points_in_streams.append(row) # 3. Get areas at coordinates drainageArea_km2 = RasterRow('drainageArea_km2') drainageArea_km2.open('r') streamsTopo.table.columns.add('drainageArea_km2_1', 'double precision') streamsTopo.table.columns.add('drainageArea_km2_2', 'double precision') streamsTopo.table.columns.add('x1', 'double precision') streamsTopo.table.columns.add('y1', 'double precision') streamsTopo.table.columns.add('x2', 'double precision') streamsTopo.table.columns.add('y2', 'double precision') cur = streamsTopo.table.conn.cursor() for i in range(len(streamsTopo)): _A_point1 = drainageArea_km2.get_value(points_in_streams[i][0]) _A_point2 = drainageArea_km2.get_value(points_in_streams[i][-1]) # 4. Sort points to go from small A to large A #streamsTopo[i+1] = points_in_streams[i].reverse() # 5. Upload small area as x1, y1; large area as x2, y2 # CATS had better be in unbroken ascending order! if _A_point1 > _A_point2: # Areas cur.execute("update streams set drainageArea_km2_1=" + str(_A_point2) + " where cat=" + str(i + 1)) cur.execute("update streams set drainageArea_km2_2=" + str(_A_point1) + " where cat=" + str(i + 1)) # Points cur.execute("update streams set x1=" + str(points_in_streams[i][-1].x) + " where cat=" + str(i + 1))
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ import matplotlib # required by windows matplotlib.use('wxAGG') # required by windows from matplotlib import pyplot as plt options, flags = gscript.parser() # Parsing window = float(options['window']) accum_mult = float(options['accum_mult']) if options['units'] == 'm2': accum_label = 'Drainage area [m$^2$]' elif options['units'] == 'km2': accum_label = 'Drainage area [km$^2$]' elif options['units'] == 'cumecs': accum_label = 'Water discharge [m$^3$ s$^{-1}$]' elif options['units'] == 'cfs': accum_label = 'Water discharge [cfs]' else: accum_label = 'Flow accumulation [$-$]' plots = options['plots'].split(',') # Attributes of streams colNames = np.array(vector_db_select(options['streams'])['columns']) colValues = np.array( vector_db_select(options['streams'])['values'].values()) tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze() cats = colValues[:, colNames == 'cat'].astype(int).squeeze() # = "fromstream" # We can loop over this list to get the shape of the full river network. selected_cats = [] segment = int(options['cat']) selected_cats.append(segment) x = [] z = [] if options['direction'] == 'downstream': # Get network gscript.message("Network") while selected_cats[-1] != 0: selected_cats.append(int(tostream[cats == selected_cats[-1]])) x.append(selected_cats[-1]) selected_cats = selected_cats[:-1] # remove 0 at end # Extract x points in network data = vector.VectorTopo( options['streams']) # Create a VectorTopo object data.open('r') # Open this object for reading coords = [] _i = 0 for i in range(len(data)): if isinstance(data.read(i + 1), vector.geometry.Line): if data.read(i + 1).cat in selected_cats: coords.append(data.read(i + 1).to_array()) gscript.core.percent(_i, len(selected_cats), 100. / len(selected_cats)) _i += 1 gscript.core.percent(1, 1, 1) coords = np.vstack(np.array(coords)) _dx = np.diff(coords[:, 0]) _dy = np.diff(coords[:, 1]) x_downstream_0 = np.hstack((0, np.cumsum((_dx**2 + _dy**2)**.5))) x_downstream = x_downstream_0.copy() elif options['direction'] == 'upstream': #terminalCATS = list(options['cat']) #while terminalCATS: # print("Upstream direction not yet active!") return """ # Add new lists for each successive upstream river river_is_upstream = while full_river_cats """ # Network extraction if options['outstream'] is not '': selected_cats_str = list(np.array(selected_cats).astype(str)) selected_cats_csv = ','.join(selected_cats_str) v.extract(input=options['streams'], output=options['outstream'], cats=selected_cats_csv, overwrite=gscript.overwrite()) # Analysis gscript.message("Elevation") if options['elevation']: _include_z = True DEM = RasterRow(options['elevation']) DEM.open('r') z = [] _i = 0 _lasti = 0 for row in coords: z.append(DEM.get_value(Point(row[0], row[1]))) if float(_i) / len(coords) > float(_lasti) / len(coords): gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _lasti = _i _i += 1 DEM.close() z = np.array(z) if options['window'] is not '': x_downstream, z = moving_average(x_downstream_0, z, window) gscript.core.percent(1, 1, 1) else: _include_z = False gscript.message("Slope") if options['slope']: _include_S = True slope = RasterRow(options['slope']) slope.open('r') S = [] _i = 0 _lasti = 0 for row in coords: S.append(slope.get_value(Point(row[0], row[1]))) if float(_i) / len(coords) > float(_lasti) / len(coords): gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _lasti = _i _i += 1 slope.close() S = np.array(S) S_0 = S.copy() if options['window'] is not '': x_downstream, S = moving_average(x_downstream_0, S, window) gscript.core.percent(1, 1, 1) else: _include_S = False gscript.message("Accumulation") if options['accumulation']: _include_A = True accumulation = RasterRow(options['accumulation']) accumulation.open('r') A = [] _i = 0 _lasti = 0 for row in coords: A.append( accumulation.get_value(Point(row[0], row[1])) * accum_mult) if float(_i) / len(coords) > float(_lasti) / len(coords): gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _lasti = _i _i += 1 accumulation.close() A = np.array(A) A_0 = A.copy() if options['window'] is not '': x_downstream, A = moving_average(x_downstream_0, A, window) gscript.core.percent(1, 1, 1) else: _include_A = False # Plotting if 'LongProfile' in plots: plt.figure() plt.plot(x_downstream / 1000., z, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel('Elevation [m]', fontsize=20) plt.tight_layout() if 'SlopeAccum' in plots: plt.figure() plt.loglog(A, S, 'ko', linewidth=2) plt.xlabel(accum_label, fontsize=20) plt.ylabel('Slope [$-$]', fontsize=20) plt.tight_layout() if 'SlopeDistance' in plots: plt.figure() plt.plot(x_downstream / 1000., S, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel('Slope [$-$]', fontsize=20) plt.tight_layout() if 'AccumDistance' in plots: plt.figure() plt.plot(x_downstream / 1000., A, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel(accum_label, fontsize=20) plt.tight_layout() plt.show() # Saving data if options['outfile_original'] is not '': header = ['x_downstream', 'E', 'N'] outfile = np.hstack((np.expand_dims(x_downstream_0, axis=1), coords)) if _include_S: header.append('slope') outfile = np.hstack((outfile, np.expand_dims(S_0, axis=1))) if _include_A: if (options['units'] == 'm2') or (options['units'] == 'km2'): header.append('drainage_area_' + options['units']) elif (options['units'] == 'cumecs') or (options['units'] == 'cfs'): header.append('water_discharge_' + options['units']) else: header.append('flow_accumulation_arbitrary_units') outfile = np.hstack((outfile, np.expand_dims(A_0, axis=1))) header = np.array(header) outfile = np.vstack((header, outfile)) np.savetxt(options['outfile_original'], outfile, '%s') if options['outfile_smoothed'] is not '': header = ['x_downstream', 'E', 'N'] # E, N on smoothed grid x_downstream, E = moving_average(x_downstream_0, coords[:, 0], window) x_downstream, N = moving_average(x_downstream_0, coords[:, 1], window) # Back to output outfile = np.hstack((np.expand_dims(x_downstream, axis=1), np.expand_dims(E, axis=1), np.expand_dims(N, axis=1))) if _include_S: header.append('slope') outfile = np.hstack((outfile, np.expand_dims(S, axis=1))) if _include_A: if (options['units'] == 'm2') or (options['units'] == 'km2'): header.append('drainage_area_' + options['units']) elif (options['units'] == 'cumecs') or (options['units'] == 'cfs'): header.append('water_discharge_' + options['units']) else: header.append('flow_accumulation_arbitrary_units') outfile = np.hstack((outfile, np.expand_dims(A, axis=1))) header = np.array(header) outfile = np.vstack((header, outfile)) np.savetxt(options['outfile_smoothed'], outfile, '%s')
def main(): """ Links each river segment to the next downstream segment in a tributary network by referencing its category (cat) number in a new column. "0" means that the river exits the map. """ # Parsing inside function _cat = int(options['cat']) overwrite_flag = gscript.overwrite() elevation = options['elevation'] if elevation == '': elevation = None slope = options['slope'] if slope == '': slope = None accumulation = options['accumulation'] if accumulation == '': accumulation = None direction = options['direction'] if direction == '': direction = None streams = options['streams'] if streams == '': streams = None outstream = options['outstream'] if outstream == '': outstream = None outfile = options['outfile'] if outfile == '': outfile = None # !!!!!!!!!!!!!!!!! # ADD SWITCHES TO INDIVIDUALLY SMOOTH SLOPE, ACCUM, ETC. # !!!!!!!!!!!!!!!!! try: window = float(options['window']) except: window = None try: dx_target = float(options['dx_target']) except: dx_target = None accum_mult = float(options['accum_mult']) if options['units'] == 'm2': accum_label = 'Drainage area [m$^2$]' elif options['units'] == 'km2': accum_label = 'Drainage area [km$^2$]' elif options['units'] == 'cumecs': accum_label = 'Water discharge [m$^3$ s$^{-1}$]' elif options['units'] == 'cfs': accum_label = 'Water discharge [cfs]' else: accum_label = 'Flow accumulation [$-$]' plots = options['plots'].split(',') # Attributes of streams colNames = np.array(vector_db_select(streams)['columns']) colValues = np.array(vector_db_select(streams)['values'].values()) warnings.warn('tostream is not generalized') tostream = colValues[:,colNames == 'tostream'].astype(int).squeeze() cats = colValues[:,colNames == 'cat'].astype(int).squeeze() # = "fromstream" # We can loop over this list to get the shape of the full river network. selected_cats = [] segment = _cat selected_cats.append(segment) # Get all cats in network data = vector.VectorTopo(streams) # Create a VectorTopo object data.open('r') # Open this object for reading if direction == 'downstream': gscript.message("Extracting drainage pathway...",) # Get network while selected_cats[-1] != 0: selected_cats.append(int(tostream[cats == selected_cats[-1]])) #x.append(selected_cats[-1]) selected_cats = selected_cats[:-1] # remove 0 at end gscript.message("Done.") elif direction == 'upstream': gscript.message("Extracting drainage network...",) # GENERALIZE COLUMN NAME!!!!!!!! tostream_col = np.where(np.array(data.table.columns.names()) == 'tostream')[0][0] terminalCats = [_cat] terminal_x_values = [0] netcats = [] net_tocats = [] while len(terminalCats) > 0: for cat in terminalCats: netcats.append(cat) # ALSO UNADVISABLE NAME -- NEED TO GET TOSTREAM, GENERALIZED #print data.table_to_dict() colnum = np.where( np.array(data.table.columns.names()) == 'tostream')[0][0] net_tocats.append(data.table_to_dict()[cat][colnum]) oldcats = terminalCats terminalCats = [] for cat in oldcats: terminalCats += list(cats[tostream == cat]) #data.close() netcats = np.array(netcats) net_tocats = np.array(net_tocats) selected_cats = netcats gscript.message("Done.") segments = [] for cat in selected_cats: points_with_cat = data.cat(cat_id=cat, vtype='lines')[0] subcoords = [] for point in points_with_cat: subcoords.append([point.x, point.y]) segments.append( rn.Segment(_id=cat, to_ids=tostream[cats == cat]) ) segments[-1].set_EastingNorthing(ENarray=subcoords) segments[-1].calc_x_from_EastingNorthing() # x grid spacing #print segments[-1].Easting[-1], segments[-1].Northing[-1] #print segments[-1].EastingNorthing[-1] #print "" if dx_target is not None: dx_target = float(dx_target) segments[-1].set_target_dx_downstream(dx_target) segments[-1].densify_x_E_N() data.close() net = rn.Network(segments) bbox = BoundingBox(points_xy=net.segments_xy_flattened()) reg_to_revert = region.Region() reg = region.Region() # to limit region for computational efficiency reg.set_bbox(bbox.bbox) reg.write() # Network extraction if outstream: selected_cats_str = list(np.array(selected_cats).astype(str)) selected_cats_csv = ','.join(selected_cats_str) v.extract( input=streams, output=outstream, \ cats=selected_cats_csv, overwrite=overwrite_flag ) # All coordinates coords = net.segments_xy_flattened() #x_downstream = # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # UPDATE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ ##### FIND RIGHT SPOT TO ADD CLASS STUFF HERE/BELOW #### # Extract x points in network data = vector.VectorTopo(streams) # Create a VectorTopo object data.open('r') # Open this object for reading coords = [] _i = 0 for i in range(len(data)): if type(data.read(i+1)) is vector.geometry.Line: if data.read(i+1).cat in selected_cats: coords.append(data.read(i+1).to_array()) gscript.core.percent(_i, len(selected_cats), 100./len(selected_cats)) _i += 1 gscript.core.percent(1, 1, 1) coords = np.vstack(np.array(coords)) _dx = np.diff(coords[:,0]) _dy = np.diff(coords[:,1]) x_downstream_0 = np.hstack((0, np.cumsum((_dx**2 + _dy**2)**.5))) x_downstream = x_downstream_0.copy() data.close() """ # TEMPORARY!!!! #x_downstream = get_xEN() #x_downstream_0 = x_downstream[0] # Analysis # Downstream distances -- 0 at mouth net.compute_x_in_network() # Elevation if elevation: gscript.message("Elevation") _include_z = True # Load DEM griddata = garray.array() griddata.read(elevation) griddata = np.flipud(griddata) # Interpolate: nearest or linear? x = np.arange(reg.west + reg.ewres/2., reg.east, reg.ewres) y = np.arange(reg.south + reg.nsres/2., reg.north, reg.nsres) itp = RegularGridInterpolator( (x, y), griddata.transpose(), method='nearest') _i = 0 _lasti = 0 _nexti = 0 for segment in net.segment_list: try: segment.set_z( itp(segment.EastingNorthing) ) except: print segment.EastingNorthing print np.vstack((segment.Easting_original, segment.Northing_original)).transpose() sys.exit() if _i > _nexti: gscript.core.percent( _i, len(net.segment_list), np.floor(_i - _lasti)) _nexti = float(_nexti) + len(net.segment_list)/10. if _nexti > len(net.segment_list): _nexti = len(net.segment_list) - 1 _lasti = _i _i += 1 gscript.core.percent(1, 1, 1) del griddata #warnings.warn('Need to handle window in network') #gscript.core.percent(1, 1, 1) else: _include_z = False # Slope if slope: gscript.message("Slope") _include_S = True _slope = RasterRow(slope) _slope.open('r') _i = 0 _lasti = 0 _nexti = 0 for segment in net.segment_list: sen = segment.EastingNorthing # all E,N S = [] for row in sen: #try: S.append(_slope.get_value(Point(row[0], row[1]))) #except: # print "ERROR" if _i > _nexti: gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _nexti = float(_nexti) + len(coords)/10. if _nexti > len(coords): _nexti = len(coords) - 1 _lasti = _i _i += 1 # MAKE SETTER FOR THIS!!!! segment.channel_slope = np.array(S) if window is not None: pass #net.smooth_window() #_x_downstream, _S = moving_average(x_downstream_0, S, window) _slope.close() S = np.array(S) S_0 = S.copy() gscript.core.percent(1, 1, 1) else: _include_S = False # Accumulation / drainage area if accumulation: gscript.message("Accumulation") _include_A = True accumulation = RasterRow(accumulation) accumulation.open('r') _i = 0 _lasti = 0 _nexti = 0 for segment in net.segment_list: A = [] sen = segment.EastingNorthing # all E,N for row in sen: A.append(accumulation.get_value(Point(row[0], row[1])) * accum_mult) if _i > _nexti: gscript.core.percent(_i, len(coords), np.floor(_i - _lasti)) _nexti = float(_nexti) + len(coords)/10. if _nexti > len(coords): _nexti = len(coords) - 1 _lasti = _i _i += 1 # MAKE SETTER FOR THIS!!!! segment.channel_flow_accumulation = np.array(A) accumulation.close() A = np.array(A) A_0 = A.copy() """ if window is not None: _x_downstream, A = moving_average(x_downstream_0, A, window) """ gscript.core.percent(1, 1, 1) else: _include_A = False # Revert to original region reg_to_revert # Smoothing if window is not None: net.smooth_window(window) # Plotting if 'LongProfile' in plots: plt.figure() if window: for segment in net.segment_list: plt.plot(segment.x/1000., segment.z_smoothed, 'k-', linewidth=2) else: for segment in net.segment_list: plt.plot(segment.x/1000., segment.z, 'k-', linewidth=2) #plt.plot(x_downstream/1000., z, 'k-', linewidth=2) plt.xlabel('Distance from mouth [km]', fontsize=16) plt.ylabel('Elevation [m]', fontsize=16) plt.tight_layout() if 'SlopeAccum' in plots: plt.figure() if window: for segment in net.segment_list: _y_points = segment.channel_slope_smoothed[ segment.channel_flow_accumulation_smoothed > 0 ] _x_points = segment.channel_flow_accumulation_smoothed[ segment.channel_flow_accumulation_smoothed > 0 ] plt.loglog(_x_points, _y_points, 'k.', alpha=.5) else: for segment in net.segment_list: _y_points = segment.channel_slope[ segment.channel_flow_accumulation > 0 ] _x_points = segment.channel_flow_accumulation[ segment.channel_flow_accumulation > 0 ] plt.loglog(_x_points, _y_points, 'k.', alpha=.5) plt.xlabel(accum_label, fontsize=16) plt.ylabel('Slope [$-$]', fontsize=16) plt.tight_layout() if 'SlopeDistance' in plots: plt.figure() if window: for segment in net.segment_list: plt.plot(segment.x/1000., segment.channel_slope_smoothed, 'k-', linewidth=2) else: for segment in net.segment_list: plt.plot(segment.x/1000., segment.channel_slope, 'k-', linewidth=2) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel('Slope [$-$]', fontsize=20) plt.tight_layout() if 'AccumDistance' in plots: plt.figure() for segment in net.segment_list: _x_points = segment.x[segment.channel_flow_accumulation > 0] _y_points = segment.channel_flow_accumulation[ segment.channel_flow_accumulation > 0 ] plt.plot(_x_points/1000., _y_points, 'k.', alpha=.5) plt.xlabel('Distance downstream [km]', fontsize=16) plt.ylabel(accum_label, fontsize=16) plt.tight_layout() plt.show() # Saving data -- will need to update for more complex data structures! if outfile: net.compute_profile_from_starting_segment() _outfile = np.vstack((net.long_profile_header, net.long_profile_output)) np.savetxt(outfile, _outfile, '%s') else: pass #print net.accum_from_headwaters[1] - net.slope_from_headwaters[1] """ for segment in net.segment_list: print segment.channel_flow_accumulation_smoothed print segment.channel_slope_smoothed print segment.channel_flow_accumulation_smoothed - \ segment.channel_slope_smoothed """ """