コード例 #1
0
ファイル: m.swim.subbasins.py プロジェクト: mwort/m.swim
def get_table(vector, dtype='S250', **kw):
    '''Get a vector table into a numpy field array, dtype can either be one
    for all or a list for each column'''
    tbl = grass.vector_db_select(vector, **kw)
    cols = tbl['columns']
    values = [tuple(row) for row in tbl['values'].values()]
    dtypes = {}
    if type(dtype) not in [list, tuple]:
        dtypes.update(dict(zip(cols, [dtype] * len(tbl['columns']))))
    elif len(dtype) != len(cols):
        raise IOError('count of dtype doesnt match the columns!')
    else:
        dtypes.update(dict(zip(cols, dtype)))

    # first check for empty entries
    tbl = np.array(values, dtype=zip(cols, ['S250'] * len(cols)))
    convertedvals = []
    for c in cols:
        i = tbl[c] == ''
        if len(tbl[c][i]) > 0:
            gm('Column %s has %s empty cells, will be parsed as float.' %
               (c, len(tbl[c][i])))
            if dtypes[c] in [float, int]:
                dtypes[c] = float
                tbl[c][i] = 'nan'
        # actual type conversion
        convertedvals += [np.array(tbl[c], dtype=dtypes[c])]
    # now properly make it
    tbl = np.array(zip(*convertedvals), dtype=[(c, dtypes[c]) for c in cols])
    tbl.sort()
    return tbl
コード例 #2
0
def analyzeSlopes(project):
  #for dirname, dirnames, filenames in os.walk(project+"/shade/"):
  #  print (dirname, dirnames, filenames)
  days = os.listdir(project+"/shade/")

  #clean db from columns
  call([vdbdropcolumn,'map=slope','columns=morning'])
  call([vdbdropcolumn,'map=slope','columns=afternoon'])
  #grass.run_command("v.db.dropcolumn", map="slope", column="morning")
  #grass.run_command("v.db.dropcolumn", map="slope", column="afternoon")
  call([vdbaddcolumn,'map=slope','columns=morning'])
  call([vdbaddcolumn,'map=slope','columns=afternoon'])
  #grass.run_command("v.db.addcolumn", map='slope', columns="morning")
  #grass.run_command("v.db.addcolumn", map='slope', columns="afternoon")
  call([vdbupdate,'map=slope','column=morning','value={}'])
  call([vdbupdate,'map=slope','column=afternoon','value={}'])

  for day in days:
    #r.external input=aprica/shade/032/afternoon.tiff output=afternoon
    ##grass.run_command("r.in.ascii", input=project+"/shade/"+day+"/afternoon.asc",output= "afternoon",overwrite=True)
    #grass.run_command("r.info",map="afternoon")
    #grass.run_command("r.mapcalc",expression="afternoonA = afternoon")
    ##v.buffer input=slope output=slopeBuffer distance=10

    slopesdb = grass.vector_db_select('slope',columns='cat')['values']
    print slopesdb
    #slopesdb=[10]
    for id in slopesdb:
      grass.run_command("v.buffer", where="cat="+str(id),input="slope",output="slopeBuffer",distance=0.0008333333333333334,overwrite=True)

      call([pathToRmask,'vector=slopeBuffer'])

      #print ("ids:",slopesdb,id)
      #a=grass.run_command("v.db.select", flags="vc",map='slope', where='cat=1', columns='morning')
      config = JSONDecoder().decode('{}')
      morningObject=JSONDecoder().decode(grass.vector_db_select('slope',columns='morning')['values'][id][0])
      afternoonObject=JSONDecoder().decode(grass.vector_db_select('slope',columns='afternoon')['values'][id][0])

      morningObject[day]=str(computeSlopeShadowStats(project,day,"morning"))
      afternoonObject[day]=str(computeSlopeShadowStats(project,day,"afternoon"))

      call([vdbupdate,'map=slope','column=morning','where=cat='+str(id),'value='+JSONEncoder().encode(morningObject)])
      call([vdbupdate,'map=slope','column=afternoon','where=cat='+str(id),'value='+JSONEncoder().encode(afternoonObject)])
      #grass.run_command("v.db.update", map='slope', columns="afternoon",where="cat="+str(id),value=4)

      #print grass.run_command("r.stats", input= project+"." + day + ".morning", separator="comma", flags="cn",overwrite=True)
      call([pathToRmask,'-r'])
コード例 #3
0
ファイル: m.swim.routing.py プロジェクト: mwort/m.swim
def readSubNxtID(subbasinsvect,columns=('subbasinID','nextID','inletID')):
    '''Vector needs subbasinID, nextID and inletID column'''
    tbl=grass.vector_db_select(subbasinsvect,columns=','.join(columns))['values'].values()
    # check if empty cells
    tbl=np.array(tbl,dtype=np.unicode)
    for i,c in enumerate(columns):
        empty=tbl[tbl[:,i]==u'',i]
        if len(empty)>0: grass.fatal('The table %s has %s null values in column %s' %(subbasinsvect,len(empty),c))
    # convert to numpy rec array
    t = np.array(zip(*tbl.T),dtype=zip(columns,(int,)*len(columns)))
    return t
コード例 #4
0
ファイル: m.swim.climate.py プロジェクト: mwort/m.swim
    def writeNCInfo(self,tbl):

        # get lon,lats (ditionary with cell ids as keys and [lon,lat] as entry
        lonlatmap = grass.vector_db_select(self.grid,columns='lon,lat')['values']

        # pickout lonlats for gridids
        lons = np.array([lonlatmap[i][0] for i in tbl['gridID']],float)
        lats = np.array([lonlatmap[i][1] for i in tbl['gridID']],float)

        # get proportions in each subbasin
        props = np.zeros(len(tbl))
        for sb in np.unique(tbl['subbasinID']):
            isb = tbl['subbasinID']==sb
            props[isb] = tbl['area'][isb]/tbl['area'][isb].sum()

        # make out array
        outtbl = np.column_stack((tbl['subbasinID'],lons,lats,props))
        # write out
        fmt = '%12i %12.3f %12.3f %12.6f'
        head= '%10s '%'subbasinID' + '%12s %12s %12s'%('lon','lat','weight')
        np.savetxt(self.ncinfopath,outtbl,fmt=fmt,header=head)
        grass.message('Wrote %s lines and %s columns to %s'%(outtbl.shape+(self.ncinfopath,)))
        return 0
コード例 #5
0
     def export(self):
          self.shapetypes = self.shapetype.getValue().split(',')

          logging.debug("Shapes computation started")
          start = time.time()

          gisenv = gscript.gisenv()

          # query shapes
          sql = 'select min'
          for stype in self.shapetypes:
               sql += ',typ{}'.format(stype)
          sql += ' from tvary'
          shapes = gscript.db_select(sql=sql, driver='sqlite',
                                     database=os.path.join(gisenv['GISDBASE'], gisenv['LOCATION_NAME'],
                                                           self.mapset, 'sqlite/sqlite.db'))

          # query map attributes
          columns = map(lambda x: 'H_{}T{}'.format(x, self.rainlength_value), self.rasters)
          columns.insert(0, self.keycolumn.getValue())
          data = gscript.vector_db_select(map=self.map_name, columns=','.join(columns))

          # export csv
          self.output_file = '{}/{}.csv'.format(self.output_dir, self.map_name)
          with open(self.output_file, 'w') as fd:
               self.export_csv(fd, data, shapes)
          # output_zfile = self.output_file + '.zip'
          # os.chdir(self.output_dir)
          # with ZipFile(output_zfile, 'w') as fzip:
          #      fzip.write('{}'.format(os.path.basename(self.output_file)))
          # self.output_csv.setValue(output_zfile)
          self.output_csv.setValue(self.output_file)

          # export png (graph)
          ### TODO

          logging.info("Shapes calculated successfully: {} sec".format(time.time() - start))
コード例 #6
0
ファイル: m.swim.routing.py プロジェクト: mwort/m.swim
    def getCourse(self,headsb):
        '''Create river course from the headwater subbasin headsb to the outlet,
        that is reached when nextID<0.
        Subbasin vector needs to have subbasinID,nextID,mainChannelLength columns

        Uploads cumulative river lengths for the subbasins of the river course.
        '''
        gm('Calculating course for subbasinID %s...' %headsb)
        # get subbasinIDs,nextIDs,mainChannelLength
        subbasins = grass.vector_db_select(self.subbasins,
                    columns='subbasinID,nextID')['values'].values()
        nextids   = {int(s[0]):int(s[1]) for s in subbasins}

        # make course column
        ccol = 'course_%s' %headsb
        grun('v.db.addcolumn',map=self.subbasins,columns='%s double' %ccol)

        # find river course
        riverlength = 0
        riversb     = []
        sb = headsb
        while sb>0:
            riversb     += [sb]
            riverlength += 1
            grun('v.db.update',map=self.subbasins,column=ccol, value=riverlength,
                 where='subbasinID=%s' %sb)
            sb = nextids[sb]
        # report
        grass.message('''
        Uploaded cumulative river length from the %s to the outlet to the
        subbasin table in column %s (number of subbasins: %s). Check subbasin
        table and sort by %s
        To extract the subbasins use:
        v.extract map=%s where="%s!=''"
        ''' %(headsb,ccol,riverlength,ccol,self.subbasins,ccol))
        return 0
コード例 #7
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary 
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    options, flags = gscript.parser()
    streams = options['map']
    x1 = options['upstream_easting_column']
    y1 = options['upstream_northing_column']
    x2 = options['downstream_easting_column']
    y2 = options['downstream_northing_column']

    streamsTopo = VectorTopo(streams)
    #streamsTopo.build()

    # 1. Get vectorTopo
    streamsTopo.open(mode='rw')
    """
    points_in_streams = []
    cat_of_line_segment = []

    # 2. Get coordinates
    for row in streamsTopo:
        cat_of_line_segment.append(row.cat)
        if type(row) == vector.geometry.Line:
            points_in_streams.append(row)
    """

    # 3. Coordinates of points: 1 = start, 2 = end
    try:
        streamsTopo.table.columns.add(x1, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(y1, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(x2, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(y2, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('tostream', 'int')
    except:
        pass
    streamsTopo.table.conn.commit()

    # Is this faster than v.to.db?
    """
    cur = streamsTopo.table.conn.cursor()
    for i in range(len(points_in_streams)):
        cur.execute("update streams set x1="+str(points_in_streams[i][0].x)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set y1="+str(points_in_streams[i][0].y)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set x2="+str(points_in_streams[i][-1].x)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set y2="+str(points_in_streams[i][-1].y)+" where cat="+str(cat_of_line_segment[i]))
    streamsTopo.table.conn.commit()
    streamsTopo.build()
    """
    # v.to.db Works more consistently, at least
    streamsTopo.close()
    v.to_db(map=streams, option='start', columns=x1 + ',' + y1)
    v.to_db(map=streams, option='end', columns=x2 + ',' + y2)

    # 4. Read in and save the start and end coordinate points
    colNames = np.array(vector_db_select(streams)['columns'])
    colValues = np.array(vector_db_select(streams)['values'].values())
    cats = colValues[:,
                     colNames == 'cat'].astype(int).squeeze()  # river number
    xy1 = colValues[:, (colNames == 'x1') + (colNames == 'y1')].astype(
        float)  # upstream
    xy2 = colValues[:, (colNames == 'x2') + (colNames == 'y2')].astype(
        float)  # downstream

    # 5. Build river network
    tocat = []
    for i in range(len(cats)):
        tosegment_mask = np.prod(xy1 == xy2[i], axis=1)
        if np.sum(tosegment_mask) == 0:
            tocat.append(0)
        else:
            tocat.append(cats[tosegment_mask.nonzero()[0][0]])
    tocat = np.asarray(tocat).astype(int)

    # This gives us a set of downstream-facing adjacencies.
    # We will update the database with it.
    streamsTopo.build()
    streamsTopo.open('rw')
    cur = streamsTopo.table.conn.cursor()
    # Default to 0 if no stream flows to it
    cur.execute("update " + streams + " set tostream=0")
    for i in range(len(tocat)):
        cur.execute("update " + streams + " set tostream=" + str(tocat[i]) +
                    " where cat=" + str(cats[i]))
    streamsTopo.table.conn.commit()
    #streamsTopo.build()
    streamsTopo.close()

    gscript.message('')
    gscript.message(
        'Drainage topology built. Check "tostream" column for the downstream cat.'
    )
    gscript.message('A cat value of 0 indicates the downstream-most segment.')
    gscript.message('')
コード例 #8
0
v.db_addcolumn(map='streams', columns='x1 double precision, y1 double precision, x2 double precision, y2 double precision')
v.to_db(map='streams', option='start', columns='x1, y1')
v.to_db(map='streams', option='end', columns='x2, y2')
"""
"""
# some representative area -- not sure how v.what.vect works with lines
v.db_addcolumn(map='streams', columns='area0 double precision')
# Hope that this is done in a consistent way -- e.g., start of line.
# But even if not, should always increase downstream.
# Unless same point is chosen for 2...
v.what_vect(map='streams', column='area0', query_map='streams_points', query_column='area_km2')
"""

# CLOSE TO BEING DONE WITH OLD RIVER NUMBERS,
# NOW THAT I JUST THRESHOLD DRAINAGE AREA
colNames = np.array(vector_db_select('streams')['columns'])
colValues = np.array(vector_db_select('streams')['values'].values())
cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
river_numbers = colValues[:, colNames == 'river_number'].astype(int).squeeze()
drainageArea_km2_1 = colValues[:, colNames == 'drainageArea_km2_1'].astype(
    float).squeeze()  # area at upstream end
xy1 = colValues[:, (colNames == 'x1') + (colNames == 'y1')].astype(
    float)  # upstream
xy2 = colValues[:, (colNames == 'x2') + (colNames == 'y2')].astype(
    float)  # downstream
xy = np.vstack((xy1, xy2))

# new river numbers: ascending order from headwaters downstream.
# Sort first by area, and then by river number.
# Raise a warning if both are the same at some point, and I will have to rethink
# this approach.
コード例 #9
0
# stats and plots
for gdem in gdems_list[::-1]:
    grass.run_command('g.region', rast=gdem, flags='pa')
    col = gdem.split('_')[1]
    grass.run_command('v.db.addcol', map='ultras', columns=col +
                      ' integer')  # create columns in vector file
    grass.run_command('v.what.rast', vector='ultras', raster=gdem,
                      column=col)  # sample

# check file
# grass.vector_db_select('ultras')['columns']
# ['cat', 'Name', 'long', 'lat', 'alt', 'proem', 'Country', 'ID', 'srtm30', 'etopo1', 'ace2', 'gmted', 'gtopo30', 'globe']

# get attribute data
attrs = grass.vector_db_select(
    'ultras',
    columns='Name,alt,proem,Country,ID,srtm30,globe,gtopo30,gmted,ace2,etopo1')

# lists for vals
cat = []
name = []
alt = []
proem = []
country = []
pid = []
srtm30 = []
globe = []
gtopo30 = []
gmted = []
ace2 = []
etopo1 = []
コード例 #10
0
def main():
    """
    Builds a grid for the MODFLOW component of the USGS hydrologic model,
    GSFLOW.
    """
    
    options, flags = gscript.parser()
    basin = options['basin']
    pp = options['pour_point']
    raster_input = options['raster_input']
    dx = options['dx']
    dy = options['dy']
    grid = options['output']
    mask = options['mask_output']
    bc_cell = options['bc_cell']
    # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp'
    
    """
    # Fatal if raster input and output are not both set
    _lena0 = (len(raster_input) == 0)
    _lenb0 = (len(raster_output) == 0)
    if _lena0 + _lenb0 == 1:
        gscript.fatal("You must set both raster input and output, or neither.")
    """
    
    # Fatal if bc_cell set but mask and grid are false
    if bc_cell != '':
        if (mask == '') or (pp == ''):
            gscript.fatal('Mask and pour point must be set to define b.c. cell')
        
    # Create grid -- overlaps DEM, three cells of padding
    g.region(raster=raster_input, ewres=dx, nsres=dy)
    gscript.use_temp_region()
    reg = gscript.region()
    reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows'])
    reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols'])
    g.region(vector=basin, ewres=dx, nsres=dy)
    regnew = gscript.region()
    # Use a grid ratio -- don't match exactly the desired MODFLOW resolution
    grid_ratio_ns = np.round(regnew['nsres']/reg['nsres'])
    grid_ratio_ew = np.round(regnew['ewres']/reg['ewres'])
    # Get S, W, and then move the unit number of grid cells over to get N and E
    # and include 3 cells of padding around the whole watershed
    _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3.*regnew['nsres']) )
    _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0]
    _s = float(reg_grid_edges_sn[_s_idx])
    _n_grid = np.arange(_s, reg['n'] + 3*grid_ratio_ns*reg['nsres'], grid_ratio_ns*reg['nsres'])
    _n_dist = np.abs(_n_grid - (regnew['n'] + 3.*regnew['nsres']))
    _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0]
    _n = float(_n_grid[_n_idx])
    _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3.*regnew['ewres']))
    _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0]
    _w = float(reg_grid_edges_we[_w_idx])
    _e_grid = np.arange(_w, reg['e'] + 3*grid_ratio_ew*reg['ewres'], grid_ratio_ew*reg['ewres'])
    _e_dist = np.abs(_e_grid - (regnew['e'] + 3.*regnew['ewres']))
    _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0]
    _e = float(_e_grid[_e_idx])
    # Finally make the region
    g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns*reg['nsres']), ewres=str(grid_ratio_ew*reg['ewres']))
    # And then make the grid
    v.mkgrid(map=grid, overwrite=gscript.overwrite())

    # Cell numbers (row, column, continuous ID)
    v.db_addcolumn(map=grid, columns='id int', quiet=True)
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:,colNames == 'row'].astype(int).squeeze()
    cols = colValues[:,colNames == 'col'].astype(int).squeeze()
    nrows = np.max(rows)
    ncols = np.max(cols)
    cats = np.ravel([cats])
    _id = np.ravel([ncols * (rows - 1) + cols])
    _id_cat = []
    for i in range(len(_id)):
        _id_cat.append( (_id[i], cats[i]) )
    gridTopo = VectorTopo(grid)
    gridTopo.open('rw')
    cur = gridTopo.table.conn.cursor()
    cur.executemany("update "+grid+" set id=? where cat=?", _id_cat)
    gridTopo.table.conn.commit()
    gridTopo.close()

    # Cell area
    v.db_addcolumn(map=grid, columns='area_m2 double precision', quiet=True)
    v.to_db(map=grid, option='area', units='meters', columns='area_m2', quiet=True)

    # Basin mask
    if len(mask) > 0:
        # Fine resolution region:
        g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres'])
        # Rasterize basin
        v.to_rast(input=basin, output=mask, use='val', value=1, overwrite=gscript.overwrite(), quiet=True)
        # Coarse resolution region:
        g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns*reg['nsres']), ewres=str(grid_ratio_ew*reg['ewres']))
        r.resamp_stats(input=mask, output=mask, method='sum', overwrite=True, quiet=True)
        r.mapcalc('tmp'+' = '+mask+' > 0', overwrite=True, quiet=True)
        g.rename(raster=('tmp',mask), overwrite=True, quiet=True)
        r.null(map=mask, null=0, quiet=True)
        # Add mask location (1 vs 0) in the MODFLOW grid
        v.db_addcolumn(map=grid, columns='basinmask double precision', quiet=True)
        v.what_rast(map=grid, type='centroid', raster=mask, column='basinmask')

    """
    # Resampled raster
    if len(raster_output) > 0:
        r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True)
    """

    # Pour point
    if len(pp) > 0:
        v.db_addcolumn(map=pp, columns=('row integer','col integer'), quiet=True)
        v.build(map=pp, quiet=True)
        v.what_vect(map=pp, query_map=grid, column='row', query_column='row', quiet=True)
        v.what_vect(map=pp, query_map=grid, column='col', query_column='col', quiet=True)

    # Next point downstream of the pour point
    # Requires pp (always) and mask (sometimes)
    # Dependency set above w/ gscript.fatal
    #g.region(raster='DEM')
    #dx = gscript.region()['ewres']
    #dy = gscript.region()['nsres']
    if len(bc_cell) > 0:
        ########## NEED TO USE TRUE TEMPORARY FILE ##########
        # May not work with dx != dy!
        v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True)
        r.buffer(input='tmp', output='tmp', distances=float(dx)*1.5, overwrite=True)
        r.mapcalc('tmp2 = if(tmp==2,1,null()) * '+raster_input, overwrite=True)
        #r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True)
        #g.region(rast='tmp')
        #r.null(map=raster_input,
        #g.region(raster=raster_input)
        #r.resample(input=raster_input, output='tmp3', overwrite=True)
        r.resamp_stats(input=raster_input, output='tmp3', method='minimum', overwrite=True)
        r.drain(input='tmp3', start_points=pp, output='tmp', overwrite=True)
        #g.region(w=str(_w), e=str(_e), s=str(_s), n=str(_n), nsres=str(grid_ratio_ns*reg['nsres']), ewres=str(grid_ratio_ew*reg['ewres']))
        #r.resamp_stats(input='tmp2', output='tmp3', overwrite=True)
        #g.rename(raster=('tmp3','tmp2'), overwrite=True, quiet=True)
        r.mapcalc('tmp3 = tmp2 * tmp', overwrite=True, quiet=True)
        g.rename(raster=('tmp3','tmp'), overwrite=True, quiet=True)
        #r.null(map='tmp', setnull=0) # Not necessary: center point removed above
        r.to_vect(input='tmp', output=bc_cell, type='point', column='z',
                  overwrite=gscript.overwrite(), quiet=True)
        v.db_addcolumn(map=bc_cell, columns=('row integer','col integer','x double precision','y double precision'), quiet=True)
        v.build(map=bc_cell, quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='row', \
                    query_column='row', quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='col', \
                    query_column='col', quiet=True)
        v.to_db(map=bc_cell, option='coor', columns=('x,y'))
        
        # Of the candidates, the pour point is the closest one
        #v.db_addcolumn(map=bc_cell, columns=('dist_to_pp double precision'), quiet=True)
        #v.distance(from_=bc_cell, to=pp, upload='dist', column='dist_to_pp')

        
        # Find out if this is diagonal: finite difference works only N-S, W-E
        colNames = np.array(gscript.vector_db_select(pp, layer=1)['columns'])
        colValues = np.array(gscript.vector_db_select(pp, layer=1)['values'].values())
        pp_row = colValues[:,colNames == 'row'].astype(int).squeeze()
        pp_col = colValues[:,colNames == 'col'].astype(int).squeeze()
        colNames = np.array(gscript.vector_db_select(bc_cell, layer=1)['columns'])
        colValues = np.array(gscript.vector_db_select(bc_cell, layer=1)['values'].values())
        bc_row = colValues[:,colNames == 'row'].astype(int).squeeze()
        bc_col = colValues[:,colNames == 'col'].astype(int).squeeze()
        # Also get x and y while we are at it: may be needed later
        bc_x = colValues[:,colNames == 'x'].astype(float).squeeze()
        bc_y = colValues[:,colNames == 'y'].astype(float).squeeze()
        if (bc_row != pp_row).all() and (bc_col != pp_col).all():
            if bc_row.ndim > 0:
                if len(bc_row) > 1:
                    for i in range(len(bc_row)):
                        """
                        UNTESTED!!!!
                        And probably unimportant -- having 2 cells with river
                        going through them is most likely going to happen with
                        two adjacent cells -- so a side and a corner
                        """
                        _col1, _row1 = str(bc_col[i]), str(pp_row[i])
                        _col2, _row2 = str(pp_col[i]), str(bc_row[i])
                        # Check if either of these is covered by the basin mask
                        _ismask_1 = gscript.vector_db_select(grid, layer=1, where='(row == '+_row1+') AND (col =='+_col1+')', columns='basinmask')
                        _ismask_1 = int(_ismask_1['values'].values()[0][0])
                        _ismask_2 = gscript.vector_db_select(grid, layer=1, where='(row == '+_row2+') AND (col =='+_col2+')', columns='basinmask')
                        _ismask_2 = int(_ismask_2['values'].values()[0][0])
                        # check if either of these is the other point
                        """
                        NOT DOING THIS YET -- HAVEN'T THOUGHT THROUGH IF
                        ACTUALLY NECESSARY. (And this is an edge case anyway)
                        """
                        # If both covered by mask, error
                        if _ismask_1 and _ismask_2:
                            gscript.fatal('All possible b.c. cells covered by basin mask.\n\
                                         Contact the developer: awickert (at) umn(.)edu')
                                
            # If not diagonal, two possible locations that are adjacent
            # to the pour point
            _col1, _row1 = str(bc_col), str(pp_row)
            _col2, _row2 = str(pp_col), str(bc_row)
            # Check if either of these is covered by the basin mask
            _ismask_1 = gscript.vector_db_select(grid, layer=1, where='(row == '+_row1+') AND (col =='+_col1+')', columns='basinmask')
            _ismask_1 = int(_ismask_1['values'].values()[0][0])
            _ismask_2 = gscript.vector_db_select(grid, layer=1, where='(row == '+_row2+') AND (col =='+_col2+')', columns='basinmask')
            _ismask_2 = int(_ismask_2['values'].values()[0][0])
            # If both covered by mask, error
            if _ismask_1 and _ismask_2:
                gscript.fatal('All possible b.c. cells covered by basin mask.\n\
                             Contact the developer: awickert (at) umn(.)edu')
            # Otherwise, those that keep those that are not covered by basin
            # mask and set ...
            # ... wait, do we want the point that touches as few interior
            # cells as possible?
            # maybe just try setting both and seeing what happens for now!
            else:
                # Get dx and dy
                #dx = gscript.region()['ewres']
                #dy = gscript.region()['nsres']
                # Build tool to handle multiple b.c. cells?
                bcvect = vector.Vector(bc_cell)
                bcvect.open('rw')
                _cat_i = 2
                if _ismask_1 != 0:
                    # _x should always be bc_x, but writing generalized code
                    _x = bc_x + float(dx) * (int(_col1) - bc_col) # col 1 at w edge
                    _y = bc_y - float(dy) * (int(_row1) - bc_row) # row 1 at n edge
                    point0 = Point(_x,_y)
                    bcvect.write(point0, cat=_cat_i, attrs=(None, _row1, _col1, _x, _y), )
                    bcvect.table.conn.commit()
                    _cat_i += 1
                if _ismask_2 != 0:
                    # _y should always be bc_y, but writing generalized code
                    _x = bc_x + float(dx) * (int(_col2) - bc_col) # col 1 at w edge
                    _y = bc_y - float(dy) * (int(_row2) - bc_row) # row 1 at n edge
                    point0 = Point(_x,_y)
                    bcvect.write(point0, cat=_cat_i, attrs=(None, _row2, _col2, _x, _y), )            
                    bcvect.table.conn.commit()
                # Build database table and vector geometry
                bcvect.build()
                bcvect.close()

    g.region(n=reg['n'], s=reg['s'], w=reg['w'], e=reg['e'], nsres=reg['nsres'], ewres=reg['ewres'])
コード例 #11
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary 
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    options, flags = gscript.parser()

    streams = options['input_streams']
    basins = options['input_basins']
    downstream_cat = options['cat']
    x_outlet = float(options['x_outlet'])
    y_outlet = float(options['y_outlet'])
    output_basins = options['output_basin']
    output_streams = options['output_streams']
    output_pour_point = options['output_pour_point']

    #print options
    #print flags

    # Check that either x,y or cat are set
    if (downstream_cat != '') or ((x_outlet != '') and (y_outlet != '')):
        pass
    else:
        gscript.fatal(
            'You must set either "cat" or "x_outlet" and "y_outlet".')

    # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!!
    if downstream_cat == '':
        # Need to find outlet pour point -- start by creating a point at this
        # location to use with v.distance
        try:
            v.db_droptable(table='tmp', flags='f')
        except:
            pass
        tmp = vector.Vector('tmp')
        _cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'x', 'DOUBLE PRECISION'),
                 (u'y', 'DOUBLE PRECISION'), (u'strcat', 'DOUBLE PRECISION')]
        tmp.open('w', tab_name='tmp', tab_cols=_cols)
        point0 = Point(x_outlet, y_outlet)
        tmp.write(
            point0,
            cat=1,
            attrs=(str(x_outlet), str(y_outlet), 0),
        )
        tmp.table.conn.commit()
        tmp.build()
        tmp.close()
        # Now v.distance
        gscript.run_command('v.distance',
                            from_='tmp',
                            to=streams,
                            upload='cat',
                            column='strcat')
        #v.distance(_from_='tmp', to=streams, upload='cat', column='strcat')
        downstream_cat = gscript.vector_db_select(map='tmp', columns='strcat')
        downstream_cat = int(downstream_cat['values'].values()[0][0])

    # Attributes of streams
    colNames = np.array(vector_db_select(streams)['columns'])
    colValues = np.array(vector_db_select(streams)['values'].values())
    tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze()
    cats = colValues[:,
                     colNames == 'cat'].astype(int).squeeze()  # = "fromstream"

    # Find network
    basincats = [downstream_cat]  # start here
    most_upstream_cats = [downstream_cat
                          ]  # all of those for which new cats must be sought
    while True:
        if len(most_upstream_cats) == 0:
            break
        tmp = list(most_upstream_cats)  # copy to a temp file: old values
        most_upstream_cats = []  # Ready to accept new values
        for ucat in tmp:
            most_upstream_cats += list(cats[tostream == int(ucat)])
            basincats += most_upstream_cats

    basincats = list(set(list(basincats)))

    basincats_str = ','.join(map(str, basincats))

    # Many basins out -- need to use overwrite flag in future!
    #SQL_OR = 'rnum = ' + ' OR rnum = '.join(map(str, basincats))
    #SQL_OR = 'cat = ' + ' OR cat = '.join(map(str, basincats))
    SQL_LIST = 'cat IN (' + ', '.join(map(str, basincats)) + ')'
    if len(basins) > 0:
        v.extract(input=basins,
                  output=output_basins,
                  where=SQL_LIST,
                  overwrite=gscript.overwrite(),
                  quiet=True)
    if len(streams) > 0:
        v.extract(input=streams,
                  output=output_streams,
                  cats=basincats_str,
                  overwrite=gscript.overwrite(),
                  quiet=True)

    # If we want to output the pour point location
    if len(output_pour_point) > 0:
        _pp = gscript.vector_db_select(map=streams,
                                       columns='x2,y2',
                                       where='cat=' + str(downstream_cat))
        _xy = np.squeeze(_pp['values'].values())
        _x = float(_xy[0])
        _y = float(_xy[1])
        # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!!
        try:
            v.db_droptable(table=output_pour_point, flags='f')
        except:
            pass
        pptmp = vector.Vector(output_pour_point)
        _cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'x', 'DOUBLE PRECISION'),
                 (u'y', 'DOUBLE PRECISION')]
        pptmp.open('w', tab_name=output_pour_point, tab_cols=_cols)
        point0 = Point(_x, _y)
        pptmp.write(
            point0,
            cat=1,
            attrs=(str(_x), str(_y)),
        )
        pptmp.table.conn.commit()
        pptmp.build()
        pptmp.close()
コード例 #12
0
 except:
   try:
     grass.run_command('g.region', rast='topo_000000')
     grass.mapcalc('discharge_to_coast_'+age+' = abs('+'ocean_plus_shore_'+age+' + accumulation_ice_'+age+')', overwrite=True)
     grass.run_command('r.null', map='discharge_to_coast_'+age, setnull=0) # speeds up     pass
     grass.run_command('g.region', w=-180, e=180)
     grass.run_command('r.to.vect', input='discharge_to_coast_'+age, output='discharge_to_coast_'+age, type='point', column='discharge_m3_s', overwrite=True)
   except:
     print age, 'ERROR'
 try:
   # Top commented because....
   # This works only if it is lat/lon; will fail for projected grids
   #grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_x,to_y', column='sea_grid_lon,sea_grid_lat')
   # Uses stored values to work for projected grids
   try:
     tmp = grass.vector_db_select('discharge_to_coast_'+age, columns='sea_grid_lat').values()[0].values()
     if tmp[0] == ['']:
       if isll:
         grass.run_command('v.distance', _from='discharge_to_coast_'+age, to='sea_grid_points', upload='to_x,to_y', column='sea_grid_lon,sea_grid_lat')
       else:
         grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='lon', column='sea_grid_lon')
         grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='lat', column='sea_grid_lat')
   except:
     grass.run_command('v.db.addcolumn', map='discharge_to_coast_'+age, columns='sea_grid_lon double precision, sea_grid_lat double precision')
     if isll:
       grass.run_command('v.distance', _from='discharge_to_coast_'+age, to='sea_grid_points', upload='to_x,to_y', column='sea_grid_lon,sea_grid_lat')
     else:
       grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='lon', column='sea_grid_lon')
       grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='lat', column='sea_grid_lat')
 except:
   pass # No discharge points!
コード例 #13
0
def main():

    pts_input = options['input']
    output = options['output']
    cost_map = options['cost_map']
    post_mask = options['post_mask']
    column = options['column']
    friction = float(options['friction'])
    layer = options['layer']
    where = options['where']
    workers = int(options['workers'])

    if workers == 1 and "WORKERS" in os.environ:
        workers = int(os.environ["WORKERS"])
    if workers < 1:
        workers = 1

    pid = str(os.getpid())
    tmp_base = 'tmp_icw_' + pid + '_'

    # do the maps exist?
    if not grass.find_file(pts_input, element='vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % pts_input)
    if post_mask:
        if grass.find_file('MASK')['file']:
            grass.fatal(
                _("A MASK already exists; remove it before using the post_mask option."
                  ))
        if not grass.find_file(post_mask)['file']:
            grass.fatal(_("Raster map <%s> not found") % post_mask)

    grass.verbose(_("v.surf.icw -- Inverse Cost Weighted Interpolation"))
    grass.verbose(
        _("Processing %s -> %s, column=%s, Cf=%g") %
        (pts_input, output, column, friction))

    if flags['r']:
        grass.verbose(_("Using (d^n)*log(d) radial basis function."))

    grass.verbose(
        "------------------------------------------------------------------------"
    )

    # adjust so that tiny numbers don't hog all the FP precision space
    #  if friction = 4: divisor ~ 10.0
    #  if friction = 5: divisor ~ 100.0
    #  if friction = 6: divisor ~ 500.0
    if friction >= 4:
        divisor = 0.01 * pow(friction, 6)
    else:
        divisor = 1

    # Check that we have the column and it is the correct type
    try:
        coltype = grass.vector_columns(pts_input, layer)[column]
    except KeyError:
        grass.fatal(
            _("Data column <%s> not found in vector points map <%s>") %
            (column, pts_input))

    if coltype['type'] not in ('INTEGER', 'DOUBLE PRECISION'):
        grass.fatal(_("Data column must be numberic"))

    # cleanse cost area mask to a flat =1 for my porpoises
    area_mask = tmp_base + 'area'
    grass.mapcalc("$result = if($cost_map, 1, null())",
                  result=area_mask,
                  cost_map=cost_map,
                  quiet=True)

    ## done with prep work,
    ########################################################################
    ## Commence crunching ..

    # crop out only points in region
    addl_opts = {}
    if where:
        addl_opts['where'] = '%s' % where

    points_list = grass.read_command('v.out.ascii',
                                     input=pts_input,
                                     output='-',
                                     flags='r',
                                     **addl_opts).splitlines()

    # Needed to strip away empty entries from MS Windows newlines
    #   list() is needed for Python 3 compatibility
    points_list = list([_f for _f in points_list if _f])

    # convert into a 2D list, drop unneeded cat column
    # to drop cat col, add this to the end of the line [:-1]
    #fixme: how does this all react for 3D starting points?
    for i in range(len(points_list)):
        points_list[i] = points_list[i].split('|')

    # count number of starting points (n). This value will later be decremented
    #  if points are found to be off the cost map or out of region.
    n = len(points_list)

    if n > 200:
        grass.warning(
            _("Computation is expensive! Please consider " +
              "fewer points or get ready to wait a while ..."))
        import time
        time.sleep(5)

    #### generate cost maps for each site in range
    grass.message(_("Generating cost maps ..."))

    # avoid do-it-yourself brain surgery
    points_list_orig = list(points_list)

    proc = {}
    num = 1
    for i in range(n):
        position = points_list_orig[i]
        easting = position[0]
        northing = position[1]
        cat = int(position[-1])

        # retrieve data value from vector's attribute table:
        data_value = grass.vector_db_select(pts_input,
                                            columns=column)['values'][cat][0]

        if not data_value:
            grass.message(
                _("Site %d of %d,  e=%.4f  n=%.4f  cat=%d  data=?") %
                (num, n, float(easting), float(northing), cat))
            grass.message(_(" -- Skipping, no data here."))
            del (points_list[num - 1])
            n -= 1
            continue
        else:
            grass.message(
                _("Site %d of %d,  e=%.4f  n=%.4f  cat=%d  data=%.8g") %
                (num, n, float(easting), float(northing), cat,
                 float(data_value)))

        # we know the point is in the region, but is it in a non-null area of the cost surface?
        rast_val = grass.read_command(
            'r.what',
            map=area_mask,
            coordinates='%s,%s' %
            (position[0], position[1])).strip().split('|')[-1]
        if rast_val == '*':
            grass.message(_(" -- Skipping, point lays outside of cost_map."))
            del (points_list[num - 1])
            n -= 1
            continue

        # it's ok to proceed
        try:
            data_value = float(data_value)
        except:
            grass.fatal('Data value [%s] is non-numeric' % data_value)

        cost_site_name = tmp_base + 'cost_site.' + '%05d' % num
        proc[num - 1] = grass.start_command('r.cost',
                                            flags='k',
                                            input=area_mask,
                                            output=cost_site_name,
                                            start_coordinates=easting + ',' +
                                            northing,
                                            quiet=True)
        # stall to wait for the nth worker to complete,
        if num % workers == 0:
            proc[num - 1].wait()

        num += 1

    # make sure everyone is finished
    for i in range(n):
        if proc[i].wait() != 0:
            grass.fatal(_('Problem running %s') % 'r.cost')

    grass.message(_("Removing anomalies at site positions ..."))

    proc = {}
    for i in range(n):
        cost_site_name = tmp_base + 'cost_site.' + '%05d' % (i + 1)
        #max_cost="$GIS_OPT_MAX_COST"  : commented out until r.null cleansing/continue code is sorted out
        #start_points=tmp_idw_cost_site_$$

        # we do this so the divisor exists and the weighting is huge at the exact sample spots
        # more efficient to reclass to 1?
        proc[i] = grass.mapcalc_start(
            "$cost_n_cleansed = if($cost_n == 0, 0.1, $cost_n)",
            cost_n_cleansed=cost_site_name + '.cleansed',
            cost_n=cost_site_name,
            quiet=True)
        # stall to wait for the nth worker to complete,
        if (i + 1) % workers == 0:
            #print 'stalling ...'
            proc[i].wait()

    # make sure everyone is finished
    for i in range(n):
        if proc[i].wait() != 0:
            grass.fatal(_('Problem running %s') % 'r.mapcalc')

    grass.message(_("Applying radial decay ..."))

    proc = {}
    for i in range(n):
        cost_site_name = tmp_base + 'cost_site.' + '%05d' % (i + 1)
        grass.run_command('g.remove',
                          flags='f',
                          type='raster',
                          name=cost_site_name,
                          quiet=True)
        grass.run_command('g.rename',
                          raster=cost_site_name + '.cleansed' + ',' +
                          cost_site_name,
                          quiet=True)

        # r.to.vect then r.patch output
        # v.to.rast in=tmp_idw_cost_site_29978 out=tmp_idw_cost_val_$$ use=val val=10

        if not flags['r']:
            #  exp(3,2) is 3^2  etc.  as is pow(3,2)
            # r.mapcalc "1by_cost_site_sqrd.$NUM =  1.0 / exp(cost_site.$NUM , $FRICTION)"
            #      EXPRESSION="1.0 / pow(cost_site.$NUM $DIVISOR, $FRICTION )"
            expr = '1.0 / pow($cost_n / ' + str(divisor) + ', $friction)'
        else:
            # use log10() or ln() ?
            #      EXPRESSION="1.0 / ( pow(cost_site.$NUM, $FRICTION) * log (cost_site.$NUM) )"
            expr = '1.0 / ( pow($cost_n, $friction) * log($cost_n) )"'

        grass.debug("r.mapcalc expression is: [%s]" % expr)

        one_by_cost_site_sq_n = tmp_base + '1by_cost_site_sq.' + '%05d' % (i +
                                                                           1)

        proc[i] = grass.mapcalc_start("$result = " + expr,
                                      result=one_by_cost_site_sq_n,
                                      cost_n=cost_site_name,
                                      friction=friction,
                                      quiet=True)
        # stall to wait for the nth worker to complete,
        if (i + 1) % workers == 0:
            #print 'stalling ...'
            proc[i].wait()

        # r.patch in=1by_cost_site_sqrd.${NUM},tmp_idw_cost_val_$$ out=1by_cost_site_sqrd.${NUM} --o
        # g.remove type=rast name=cost_site.$NUM -f

    # make sure everyone is finished
    for i in range(n):
        if proc[i].wait() != 0:
            grass.fatal(_('Problem running %s') % 'r.mapcalc')

    grass.run_command('g.remove',
                      flags='f',
                      type='raster',
                      pattern=tmp_base + 'cost_site.*',
                      quiet=True)
    #grass.run_command('g.list', type = 'raster', mapset = '.')

    #######################################################
    #### Step 3) find sum(cost^2)
    grass.verbose('')
    grass.verbose(_("Finding sum of squares ..."))

    #todo: test if MASK exists already, fatal exit if it does?
    if post_mask:
        grass.message(_("Setting post_mask <%s>"), post_mask)
        grass.mapcalc("MASK = $maskmap", maskmap=post_mask, overwrite=True)

    grass.message(_("Summation of cost weights ..."))

    input_maps = tmp_base + '1by_cost_site_sq.%05d' % 1

    global TMP_FILE
    TMP_FILE = grass.tempfile()
    with open(TMP_FILE, 'w') as maplist:
        for i in range(2, n + 1):
            mapname = '%s1by_cost_site_sq.%05d' % (tmp_base, i)
            maplist.write(mapname + '\n')

    #grass.run_command('g.list', type = 'raster', mapset = '.')

    sum_of_1by_cost_sqs = tmp_base + 'sum_of_1by_cost_sqs'
    try:
        grass.run_command('r.series',
                          method='sum',
                          file=TMP_FILE,
                          output=sum_of_1by_cost_sqs)
    except CalledModuleError:
        grass.fatal(_('Problem running %s') % 'r.series')

    if post_mask:
        grass.message(_("Removing post_mask <%s>"), post_mask)
        grass.run_command('g.remove', flags='f', name='MASK', quiet=True)

    #######################################################
    #### Step 4) ( 1/di^2 / sum(1/d^2) ) *  ai
    grass.verbose('')
    grass.message(_("Creating partial weights ..."))

    proc = {}
    num = 1
    for position in points_list:
        easting = position[0]
        northing = position[1]
        cat = int(position[-1])
        data_value = grass.vector_db_select(pts_input,
                                            columns=column)['values'][cat][0]
        data_value = float(data_value)

        # failsafe: at this point the data values should all be valid
        if not data_value:
            grass.message(
                _("Site %d of %d,  cat = %d, data value = ?") % (num, n, cat))
            grass.message(
                _(" -- Skipping, no data here. [Probably programmer error]"))
            n -= 1
            continue
        else:
            grass.message(
                _("Site %d of %d,  cat = %d, data value = %.8g") %
                (num, n, cat, data_value))

        # we know the point is in the region, but is it in a non-null area of the cost surface?
        rast_val = grass.read_command(
            'r.what',
            map=area_mask,
            coordinates='%s,%s' %
            (position[0], position[1])).strip().split('|')[-1]
        if rast_val == '*':
            grass.message(
                _(" -- Skipping, point lays outside of cost_map. [Probably programmer error]"
                  ))
            n -= 1
            continue

        partial_n = tmp_base + 'partial.' + '%05d' % num
        one_by_cost_site_sq = tmp_base + '1by_cost_site_sq.' + '%05d' % num

        #"( $DATA_VALUE / $N ) * (1.0 - ( cost_sq_site.$NUM / sum_of_cost_sqs ))"
        #"( cost_sq_site.$NUM / sum_of_cost_sqs ) * ( $DATA_VALUE / $N )"

        proc[num - 1] = grass.mapcalc_start(
            "$partial_n = ($data * $one_by_cost_sq) / $sum_of_1by_cost_sqs",
            partial_n=partial_n,
            data=data_value,
            one_by_cost_sq=one_by_cost_site_sq,
            sum_of_1by_cost_sqs=sum_of_1by_cost_sqs,
            quiet=True)

        # stall to wait for the nth worker to complete,
        if num % workers == 0:
            proc[num - 1].wait()

        # free up disk space ASAP
        #grass.run_command('g.remove', flags = 'f', type = 'raster', name = one_by_cost_site_sq, quiet = True)

        num += 1
        if num > n:
            break

    # make sure everyone is finished
    for i in range(n):
        proc[i].wait()

    # free up disk space ASAP
    grass.run_command('g.remove',
                      flags='f',
                      type='raster',
                      pattern=tmp_base + '1by_cost_site_sq.*',
                      quiet=True)
    #grass.run_command('g.list', type = 'raster', mapset = '.')

    #######################################################
    grass.message('')
    grass.message(_("Calculating final values ..."))

    input_maps = tmp_base + 'partial.%05d' % 1
    for i in range(2, n + 1):
        input_maps += ',%spartial.%05d' % (tmp_base, i)

    try:
        grass.run_command('r.series',
                          method='sum',
                          input=input_maps,
                          output=output)
    except CalledModuleError:
        grass.fatal(_('Problem running %s') % 'r.series')

    #TODO: r.patch in v.to.rast of values at exact seed site locations. currently set to null

    grass.run_command('r.colors', map=output, color='bcyr', quiet=True)
    grass.run_command('r.support',
                      map=output,
                      history='',
                      title='Inverse cost-weighted interpolation')
    grass.run_command('r.support',
                      map=output,
                      history='v.surf.icw interpolation:')
    grass.run_command('r.support',
                      map=output,
                      history='  input map=' + pts_input +
                      '   attribute column=' + column)
    grass.run_command('r.support',
                      map=output,
                      history='  cost map=' + cost_map +
                      '   coefficient of friction=' + str(friction))
    if flags['r']:
        grass.run_command('r.support',
                          map=output,
                          history='  (d^n)*log(d) as radial basis function')
    if post_mask:
        grass.run_command('r.support',
                          map=output,
                          history='  post-processing mask=' + post_mask)
    if where:
        grass.run_command('r.support',
                          map=output,
                          history='  SQL query= WHERE ' + where)

    # save layer #? to metadata?   command line hist?

    #######################################################
    # Step 5) rm cost and cost_sq maps, tmp_icw_points, etc
    cleanup()

    #######################################################
    # Step 6) done!
    grass.message(_("Done! Results written to <%s>." % output))
コード例 #14
0
ファイル: frame.py プロジェクト: rkanavath/grass-ci
    def _getSTVDData(self, timeseries):
        """Load data and read properties
        :param list timeseries: a list of timeseries
        """

        mode = None
        unit = None
        cats = None
        attribute = self.attribute.GetValue()
        if self.cats.GetValue() != '':
            cats = self.cats.GetValue().split(',')
        if cats and self.poi:
            GMessage(message=_("Both coordinates and categories are set, "
                               "coordinates will be used. The use categories "
                               "remove text from coordinate form"))
        if not attribute or attribute == '':
            GError(parent=self,
                   showTraceback=False,
                   message=_("With Vector temporal dataset you have to select"
                             " an attribute column"))
            return
        columns = ','.join(['name', 'start_time', 'end_time', 'id', 'layer'])
        for series in timeseries:
            name = series[0]
            fullname = name + '@' + series[1]
            etype = series[2]
            sp = tgis.dataset_factory(etype, fullname)
            if not sp.is_in_db(dbif=self.dbif):
                GError(message=_("Dataset <%s> not found in temporal "
                                 "database") % (fullname),
                       parent=self,
                       showTraceback=False)
                return
            sp.select(dbif=self.dbif)

            rows = sp.get_registered_maps(dbif=self.dbif,
                                          order="start_time",
                                          columns=columns,
                                          where=None)

            self.timeDataV[name] = OrderedDict()
            self.timeDataV[name]['temporalDataType'] = etype
            self.timeDataV[name]['temporalType'] = sp.get_temporal_type()
            self.timeDataV[name]['granularity'] = sp.get_granularity()

            if mode is None:
                mode = self.timeDataV[name]['temporalType']
            elif self.timeDataV[name]['temporalType'] != mode:
                GError(parent=self,
                       showTraceback=False,
                       message=_(
                           "Datasets have different temporal type ("
                           "absolute x relative), which is not allowed."))
                return
            self.timeDataV[name]['unit'] = None  # only with relative
            if self.timeDataV[name]['temporalType'] == 'relative':
                start, end, self.timeDataV[name][
                    'unit'] = sp.get_relative_time()
                if unit is None:
                    unit = self.timeDataV[name]['unit']
                elif self.timeDataV[name]['unit'] != unit:
                    GError(message=_("Datasets have different time unit which"
                                     " is not allowed."),
                           parent=self,
                           showTraceback=False)
                    return
            if self.poi:
                self.plotNameListV.append(name)
                # TODO set an appropriate distance, right now a big one is set
                # to return the closer point to the selected one
                out = grass.vector_what(map='pois_srvds',
                                        coord=self.poi.coords(),
                                        distance=10000000000000000)
                if len(out) != len(rows):
                    GError(parent=self,
                           showTraceback=False,
                           message=_("Difference number of vector layers and "
                                     "maps in the vector temporal dataset"))
                    return
                for i in range(len(rows)):
                    row = rows[i]
                    values = out[i]
                    if str(row['layer']) == str(values['Layer']):
                        lay = "{map}_{layer}".format(map=row['name'],
                                                     layer=values['Layer'])
                        self.timeDataV[name][lay] = {}
                        self.timeDataV[name][lay]['start_datetime'] = row[
                            'start_time']
                        self.timeDataV[name][lay]['end_datetime'] = row[
                            'start_time']
                        self.timeDataV[name][lay]['value'] = values[
                            'Attributes'][attribute]
            else:
                wherequery = ''
                cats = self._getExistingCategories(rows[0]['name'], cats)
                totcat = len(cats)
                ncat = 1
                for cat in cats:
                    if ncat == 1 and totcat != 1:
                        wherequery += '{k}={c} or'.format(c=cat, k="{key}")
                    elif ncat == 1 and totcat == 1:
                        wherequery += '{k}={c}'.format(c=cat, k="{key}")
                    elif ncat == totcat:
                        wherequery += ' {k}={c}'.format(c=cat, k="{key}")
                    else:
                        wherequery += ' {k}={c} or'.format(c=cat, k="{key}")

                    catn = "cat{num}".format(num=cat)
                    self.plotNameListV.append("{na}+{cat}".format(na=name,
                                                                  cat=catn))
                    self.timeDataV[name][catn] = OrderedDict()
                    ncat += 1
                for row in rows:
                    lay = int(row['layer'])
                    catkey = self._parseVDbConn(row['name'], lay)
                    if not catkey:
                        GError(parent=self,
                               showTraceback=False,
                               message=_(
                                   "No connection between vector map {vmap} "
                                   "and layer {la}".format(vmap=row['name'],
                                                           la=lay)))
                        return
                    vals = grass.vector_db_select(
                        map=row['name'],
                        layer=lay,
                        where=wherequery.format(key=catkey),
                        columns=attribute)
                    layn = "lay{num}".format(num=lay)
                    for cat in cats:
                        catn = "cat{num}".format(num=cat)
                        if layn not in self.timeDataV[name][catn].keys():
                            self.timeDataV[name][catn][layn] = {}
                        self.timeDataV[name][catn][layn][
                            'start_datetime'] = row['start_time']
                        self.timeDataV[name][catn][layn]['end_datetime'] = row[
                            'end_time']
                        self.timeDataV[name][catn][layn]['value'] = vals[
                            'values'][int(cat)][0]
        self.unit = unit
        self.temporalType = mode
        return
コード例 #15
0
# Then remove all sub-basins and segments that have negative flow accumulation
# (i.e. have contributions from outside the map)

###################################################################
# Intermediate step: Remove all basins that have offmap flow
# i.e., those containing cells with negative flow accumulation
###################################################################

# Method 3 -- even easier
grass.mapcalc("has_offmap_flow = (flowAccum < 0)", overwrite=True)
grass.run_command('r.null', map='has_offmap_flow', setnull=0)
grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
grass.run_command('v.db.addcolumn', map='has_offmap_flow', columns='badbasin_cats integer')
grass.run_command('v.what.vect', map='has_offmap_flow', column='badbasin_cats', query_map='basins', query_column='cat', dmax=60)
colNames = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['columns'])
# offmap incoming flow points
colValues = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['values'].values())
badcats = colValues[:,colNames == 'badbasin_cats'].squeeze()
badcats = badcats[badcats != '']
badcats = badcats.astype(int)
badcats = list(set(list(badcats)))
# basins for full cat list
colNames = np.array(grass.vector_db_select('basins', layer=1)['columns'])
colValues = np.array(grass.vector_db_select('basins', layer=1)['values'].values())
allcats = colValues[:,colNames == 'cat'].astype(int).squeeze()
allcats = list(set(list(allcats)))
# xor to goodcats
#goodcats = set(badcats).symmetric_difference(allcats)
# but better in case somehow there are badcats that are not allcats to do NOT
goodcats = list(set(allcats) - set(badcats))
コード例 #16
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary 
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    # Parsing inside function
    _cat = int(options['cat'])
    overwrite_flag = gscript.overwrite()
    elevation = options['elevation']
    if elevation == '': elevation = None    
    slope = options['slope']
    if slope == '': slope = None    
    accumulation = options['accumulation']
    if accumulation == '': accumulation = None
    direction = options['direction']
    if direction == '': direction = None
    streams = options['streams']
    if streams == '': streams = None
    outstream = options['outstream']
    if outstream == '': outstream = None
    outfile = options['outfile']
    if outfile == '': outfile = None
    # !!!!!!!!!!!!!!!!!
    # ADD SWITCHES TO INDIVIDUALLY SMOOTH SLOPE, ACCUM, ETC.
    # !!!!!!!!!!!!!!!!!
    try:
        window = float(options['window'])
    except:
        window = None
    try:
        dx_target = float(options['dx_target'])
    except:
        dx_target = None
    accum_mult = float(options['accum_mult'])
    if options['units'] == 'm2':
        accum_label = 'Drainage area [m$^2$]'
    elif options['units'] == 'km2':
        accum_label = 'Drainage area [km$^2$]'
    elif options['units'] == 'cumecs':
        accum_label = 'Water discharge [m$^3$ s$^{-1}$]'
    elif options['units'] == 'cfs':
        accum_label = 'Water discharge [cfs]'
    else:
        accum_label = 'Flow accumulation [$-$]'
    plots = options['plots'].split(',')

    # Attributes of streams
    colNames = np.array(vector_db_select(streams)['columns'])
    colValues = np.array(vector_db_select(streams)['values'].values())
    warnings.warn('tostream is not generalized')
    tostream = colValues[:,colNames == 'tostream'].astype(int).squeeze()
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze() # = "fromstream"

    # We can loop over this list to get the shape of the full river network.
    selected_cats = []
    segment = _cat
    selected_cats.append(segment)

    # Get all cats in network
    data = vector.VectorTopo(streams) # Create a VectorTopo object
    data.open('r') # Open this object for reading

    if direction == 'downstream':
        gscript.message("Extracting drainage pathway...",)
        # Get network
        while selected_cats[-1] != 0:
            selected_cats.append(int(tostream[cats == selected_cats[-1]]))
        #x.append(selected_cats[-1])
        selected_cats = selected_cats[:-1] # remove 0 at end
        gscript.message("Done.")
        
        
    elif direction == 'upstream':
        gscript.message("Extracting drainage network...",)
        # GENERALIZE COLUMN NAME!!!!!!!!
        tostream_col = np.where(np.array(data.table.columns.names())
                                == 'tostream')[0][0]
        terminalCats = [_cat]
        terminal_x_values = [0]
        netcats = []
        net_tocats = []
        while len(terminalCats) > 0:
            for cat in terminalCats:
                netcats.append(cat)
                # ALSO UNADVISABLE NAME -- NEED TO GET TOSTREAM, GENERALIZED
                #print data.table_to_dict()
                colnum = np.where( np.array(data.table.columns.names()) 
                                   == 'tostream')[0][0]
                net_tocats.append(data.table_to_dict()[cat][colnum])
            oldcats = terminalCats
            terminalCats = []
            for cat in oldcats:
                terminalCats += list(cats[tostream == cat])
        #data.close()
        netcats = np.array(netcats)
        net_tocats = np.array(net_tocats)
        
        selected_cats = netcats
        gscript.message("Done.")
        
    segments = []
    for cat in selected_cats:
        points_with_cat = data.cat(cat_id=cat, vtype='lines')[0]
        subcoords = []
        for point in points_with_cat:
            subcoords.append([point.x, point.y])
        segments.append( rn.Segment(_id=cat, to_ids=tostream[cats == cat]) )
        segments[-1].set_EastingNorthing(ENarray=subcoords)
        segments[-1].calc_x_from_EastingNorthing()
        # x grid spacing
        #print segments[-1].Easting[-1], segments[-1].Northing[-1]
        #print segments[-1].EastingNorthing[-1]
        #print ""
        if dx_target is not None:
            dx_target = float(dx_target)
            segments[-1].set_target_dx_downstream(dx_target)
            segments[-1].densify_x_E_N()
    data.close()
    
    net = rn.Network(segments)

    bbox = BoundingBox(points_xy=net.segments_xy_flattened())
    reg_to_revert = region.Region()
    reg = region.Region() # to limit region for computational efficiency
    reg.set_bbox(bbox.bbox)
    reg.write()
    
    # Network extraction
    if outstream:
        selected_cats_str = list(np.array(selected_cats).astype(str))
        selected_cats_csv = ','.join(selected_cats_str)
        v.extract( input=streams, output=outstream, \
                   cats=selected_cats_csv, overwrite=overwrite_flag )
    
    
    # All coordinates
    coords = net.segments_xy_flattened()
    #x_downstream = 
    
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # UPDATE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    """
    ##### FIND RIGHT SPOT TO ADD CLASS STUFF HERE/BELOW ####
    
    # Extract x points in network
    data = vector.VectorTopo(streams) # Create a VectorTopo object
    data.open('r') # Open this object for reading
    
    coords = []
    _i = 0
    for i in range(len(data)):
        if type(data.read(i+1)) is vector.geometry.Line:
            if data.read(i+1).cat in selected_cats:
                coords.append(data.read(i+1).to_array())
                gscript.core.percent(_i, len(selected_cats), 100./len(selected_cats))
                _i += 1
    gscript.core.percent(1, 1, 1)
    coords = np.vstack(np.array(coords))
    
    _dx = np.diff(coords[:,0])
    _dy = np.diff(coords[:,1])
    x_downstream_0 = np.hstack((0, np.cumsum((_dx**2 + _dy**2)**.5)))
    x_downstream = x_downstream_0.copy()
    
    data.close()
    """
  
    
    # TEMPORARY!!!!
    #x_downstream = get_xEN()
    #x_downstream_0 = x_downstream[0]

    # Analysis

    # Downstream distances -- 0 at mouth
    net.compute_x_in_network()

    # Elevation
    if elevation:
        gscript.message("Elevation")
        _include_z = True
        # Load DEM
        griddata = garray.array()
        griddata.read(elevation)
        griddata = np.flipud(griddata)
        # Interpolate: nearest or linear?
        x = np.arange(reg.west + reg.ewres/2., reg.east, reg.ewres)
        y = np.arange(reg.south + reg.nsres/2., reg.north, reg.nsres)
        itp = RegularGridInterpolator( (x, y), griddata.transpose(), 
                                       method='nearest')
        _i = 0
        _lasti = 0
        _nexti = 0
        for segment in net.segment_list:
            try:
                segment.set_z( itp(segment.EastingNorthing) )
            except:
                print segment.EastingNorthing
                print np.vstack((segment.Easting_original, segment.Northing_original)).transpose()
                sys.exit()
            if _i > _nexti:
                gscript.core.percent( _i, len(net.segment_list), np.floor(_i - _lasti))
                _nexti = float(_nexti) + len(net.segment_list)/10.
                if _nexti > len(net.segment_list):
                    _nexti = len(net.segment_list) - 1
            _lasti = _i
            _i += 1
        gscript.core.percent(1, 1, 1)
        del griddata
        #warnings.warn('Need to handle window in network')
        #gscript.core.percent(1, 1, 1)
    else:
        _include_z = False

    # Slope
    if slope:
        gscript.message("Slope")
        _include_S = True
        _slope = RasterRow(slope)
        _slope.open('r')
        _i = 0
        _lasti = 0
        _nexti = 0
        for segment in net.segment_list:
            sen = segment.EastingNorthing # all E,N
            S = []
            for row in sen:
                #try:
                S.append(_slope.get_value(Point(row[0], row[1])))
                #except:
                #    print "ERROR"
                if _i > _nexti:
                    gscript.core.percent(_i, len(coords), np.floor(_i - _lasti))
                    _nexti = float(_nexti) + len(coords)/10.
                    if _nexti > len(coords):
                        _nexti = len(coords) - 1
                _lasti = _i
                _i += 1
            # MAKE SETTER FOR THIS!!!!
            segment.channel_slope = np.array(S)
        if window is not None:
            pass
            #net.smooth_window()
            #_x_downstream, _S = moving_average(x_downstream_0, S, window)
        _slope.close()
        S = np.array(S)
        S_0 = S.copy()
        gscript.core.percent(1, 1, 1)
    else:
        _include_S = False

    # Accumulation / drainage area
    if accumulation:
        gscript.message("Accumulation")
        _include_A = True
        accumulation = RasterRow(accumulation)
        accumulation.open('r')
        _i = 0
        _lasti = 0
        _nexti = 0
        for segment in net.segment_list:
            A = []
            sen = segment.EastingNorthing # all E,N
            for row in sen:
                A.append(accumulation.get_value(Point(row[0], row[1])) 
                                                          * accum_mult)
                if _i > _nexti:
                    gscript.core.percent(_i, len(coords), np.floor(_i - _lasti))
                    _nexti = float(_nexti) + len(coords)/10.
                    if _nexti > len(coords):
                        _nexti = len(coords) - 1
                _lasti = _i
                _i += 1
            # MAKE SETTER FOR THIS!!!!
            segment.channel_flow_accumulation = np.array(A)
        accumulation.close()
        A = np.array(A)
        A_0 = A.copy()
        """
        if window is not None:
            _x_downstream, A = moving_average(x_downstream_0, A, window)
        """
        gscript.core.percent(1, 1, 1)
    else:
        _include_A = False

    # Revert to original region
    reg_to_revert

    # Smoothing
    if window is not None:
        net.smooth_window(window)

    # Plotting
    if 'LongProfile' in plots:
        plt.figure()
        if window:
            for segment in net.segment_list:
                plt.plot(segment.x/1000., segment.z_smoothed, 'k-', linewidth=2)
        else:
            for segment in net.segment_list:
                plt.plot(segment.x/1000., segment.z, 'k-', linewidth=2)
        #plt.plot(x_downstream/1000., z, 'k-', linewidth=2)
        plt.xlabel('Distance from mouth [km]', fontsize=16)
        plt.ylabel('Elevation [m]', fontsize=16)
        plt.tight_layout()
    if 'SlopeAccum' in plots:
        plt.figure()
        if window:
            for segment in net.segment_list:
                _y_points = segment.channel_slope_smoothed[
                                segment.channel_flow_accumulation_smoothed > 0
                                ]
                _x_points = segment.channel_flow_accumulation_smoothed[
                                segment.channel_flow_accumulation_smoothed > 0
                                ]
                plt.loglog(_x_points, _y_points, 'k.', alpha=.5)
        else:
            for segment in net.segment_list:
                _y_points = segment.channel_slope[
                                    segment.channel_flow_accumulation > 0
                                    ]
                _x_points = segment.channel_flow_accumulation[
                                    segment.channel_flow_accumulation > 0
                                    ]
                plt.loglog(_x_points, _y_points, 'k.', alpha=.5)
        plt.xlabel(accum_label, fontsize=16)
        plt.ylabel('Slope [$-$]', fontsize=16)
        plt.tight_layout()
    if 'SlopeDistance' in plots:
        plt.figure()
        if window:
            for segment in net.segment_list:
                plt.plot(segment.x/1000., segment.channel_slope_smoothed,
                            'k-', linewidth=2)
        else:
            for segment in net.segment_list:
                plt.plot(segment.x/1000., segment.channel_slope,
                            'k-', linewidth=2)
        plt.xlabel('Distance downstream [km]', fontsize=16)
        plt.ylabel('Slope [$-$]', fontsize=20)
        plt.tight_layout()
    if 'AccumDistance' in plots:
        plt.figure()
        for segment in net.segment_list:
            _x_points = segment.x[segment.channel_flow_accumulation > 0]
            _y_points = segment.channel_flow_accumulation[
                                         segment.channel_flow_accumulation > 0
                                         ]
            plt.plot(_x_points/1000., _y_points, 'k.', alpha=.5)
        plt.xlabel('Distance downstream [km]', fontsize=16)
        plt.ylabel(accum_label, fontsize=16)
        plt.tight_layout()
    plt.show()
    
    # Saving data -- will need to update for more complex data structures!
    if outfile:
        net.compute_profile_from_starting_segment()
        _outfile = np.vstack((net.long_profile_header, net.long_profile_output))
        np.savetxt(outfile, _outfile, '%s')
    else:
        pass
        
    #print net.accum_from_headwaters[1] - net.slope_from_headwaters[1]

    """
    for segment in net.segment_list:
        print segment.channel_flow_accumulation_smoothed
        print segment.channel_slope_smoothed
        print segment.channel_flow_accumulation_smoothed - \
              segment.channel_slope_smoothed
    """
    
    """
コード例 #17
0
ファイル: m.swim.run.py プロジェクト: mwort/m.swim
def getStations(resourcedir='mySWIM',**datakwargs):
    ####### STATIONS ################################################    
    stationsinfo = grass.vector_info(options['stationsvect'])

    # get stations table    
    try: stationstbl = grass.vector_db_select(options['stationsvect'])
    except: grass.fatal('Cant read the attribute table of %s, has it got one?' %options['stationsvect'])
    # make pandas df
    stationstbl = pa.DataFrame(stationstbl['values'].values(),columns=stationstbl['columns'])

    # check if it finds the stationidcolumn
    if len(options['stationidcolumn'])>0:
        if options['stationidcolumn'] not in stationstbl:
            grass.fatal('Cant find stationidcolumn %s' %options['stationidcolumn'])
        idcol = stationstbl[options['stationidcolumn']]
    elif 'stationID' in stationstbl:
        gm('Found stationID column in the subbasinsvect table.')
        idcol = stationstbl['stationID']
    else:
        ### stationID DESIGN #################################################
        gm('''Will use the order of stations in the stationvect as stationIDs and will assign these stationIDs:''')
        idcol = ['s%s' %s for s in range(1,len(stationstbl)+1)]
        gm(idcol)
        # make subbasinID column and upload them
        grun('v.db.addcolumn',map=options['stationsvect'],
                          columns='stationID varchar(5)',quiet=True)
        catcol = stationstbl.icol(1)
        for i,s in enumerate(catcol):
            grun('v.db.update',map=options['stationsvect'], column='stationID',
                 where='%s=%s' %(catcol.name,s), value=idcol[i],quiet=True)
        # make sure the csvdata file is read without header
        datakwargs.update({'names':['time']+idcol,'skiprows':1})
    # set as index
    stationstbl.index = idcol
    stationstbl.index.name='stationID'
    
    # check if it finds the subbasinidcolumn
    if len(options['subbasinidcolumn'])> 0:
        if options['subbasinidcolumn'] not in stationstbl:
            grass.fatal('Cant find subbasinidcolumn %s' %options['subbasinidcolumn'])
        # change name of column to subbasinID
        stationstbl['subbasinID'] = stationstbl[options['subbasinidcolumn']]
        
    elif 'subbasinID' in stationstbl:
        gm('Found subbasinID column in the subbasinsvect table.')
        
    else:
        gm('Will upload subbasinIDs from the subbasins rast to the stationsvect table. Double check them and run the setup again if changed.')
        # first check if in same mapset then stationsvect
        if stationsinfo['mapset'] != grass.gisenv()['MAPSET']:
            grass.warning(' !!! Changing into mapset: %s !!!' %stationsinfo['mapset'])
            grun('g.mapset',mapset=stationsinfo['mapset'],quiet=True)
        grun('v.db.addcolumn',map=options['stationsvect'],
                          columns='subbasinID int',quiet=True)
        grun('v.what.rast',map=options['stationsvect'],
                          raster=options['subbasins'],column='subbasinID',quiet=True)
        subids = grass.vector_db_select(options['stationsvect'],columns='subbasinID')
        stationstbl['subbasinID'] = np.array(subids['values'].values()).flatten()
    # convert subbasinIDs to int
    for i,s in enumerate(stationstbl['subbasinID']):
        try: stationstbl['subbasinID'][i] = int(s)
        except: stationstbl['subbasinID'][i] = np.nan
        
    # report on found subbasinIDs
    gm('Found these subbasinIDs:\n%s' %stationstbl['subbasinID'])
    
    
    # read data
    gm('Attempting to read discharge data. I am expecting a header like this:')
    gm(','.join(['YYYY-MM-DD']+list(idcol)))
    data = pa.read_csv(options['csvdata'],parse_dates=0,index_col=0,**datakwargs)
    data.index.name = 'time'
    # check if read properlyq
    # check if all cats are in csvdata as columns
    for s in stationstbl.index:
        if s not in data:
            grass.warning('Cant find %s in the csvdata table header. This station wont have any data.' %s)
            gm(stationstbl.ix[s].to_string())
            data[s]=np.nan
    # get days per year covered
    peran = (~data.isnull()).astype(int).resample('a','sum')
    peran.index=peran.index.year
    # report
    gm('Overvations found (days per year):')
    for l in peran.to_string().split('\n'): gm(l)
#        # write individual files to resource folder
#        else:
#            path = os.path.join(resourcedir,s+'.pa')
#            sdat = pa.DataFrame({'Q':data[s]})
#            sdat.index.name = 'time'
#            sdat.to_pickle(path)
#            gm('Saved Q data for the station %s to %s in a python.pandas format.' %(s,path))
#            gm(sdat.head().to_string())
#            fpaths += [path]
#    # attach paths to stationstbl
#    stationstbl['data'] = fpaths
            
    # write out table to resourcedir
    path = os.path.join(resourcedir,'observations.csv')
    data.to_csv(path)
    # return as dictionary of stations
    stations = stationstbl.T.to_dict()
    return stations
コード例 #18
0
# IREACH (whole next section dedicated to this)
# SLOPE (need z_start and z_end)

# Now, the light stuff is over: time to build the reach order
v.db_addcolumn(map='reaches', columns='xr1 double precision, yr1 double precision, xr2 double precision, yr2 double precision')
v.to_db(map='reaches', option='start', columns='xr1,yr1')
v.to_db(map='reaches', option='end', columns='xr2,yr2')

# Now just sort by category, find which stream has the same xr1 and yr1 as
# x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another 
# starting point and move down the line.
# v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1"

# First, get the starting coordinates of each stream segment
# and a set of river ID's (ordered from 1...N)
colNames = np.array(gscript.vector_db_select('segments', layer=1)['columns'])
colValues = np.array(gscript.vector_db_select('segments', layer=1)['values'].values())
number_of_segments = colValues.shape[0]
segment_x1s = colValues[:,colNames == 'x1'].astype(float).squeeze()
segment_y1s = colValues[:,colNames == 'y1'].astype(float).squeeze()
segment_ids = colValues[:,colNames == 'id'].astype(float).squeeze()

# Then move back to the reaches map to produce the ordering
colNames = np.array(gscript.vector_db_select('reaches', layer=1)['columns'])
colValues = np.array(gscript.vector_db_select('reaches', layer=1)['values'].values())
reach_cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
reach_x1s = colValues[:,colNames == 'xr1'].astype(float).squeeze()
reach_y1s = colValues[:,colNames == 'yr1'].astype(float).squeeze()
reach_x2s = colValues[:,colNames == 'xr2'].astype(float).squeeze()
reach_y2s = colValues[:,colNames == 'yr2'].astype(float).squeeze()
segment_ids__reach = colValues[:,colNames == 'segment_id'].astype(float).squeeze()
コード例 #19
0
def main():
    """
    Builds a grid for the MODFLOW component of the USGS hydrologic model,
    GSFLOW.
    """

    options, flags = gscript.parser()
    basin = options['basin']
    pp = options['pour_point']
    raster_input = options['raster_input']
    dx = options['dx']
    dy = options['dy']
    grid = options['output']
    mask = options['mask_output']
    bc_cell = options['bc_cell']
    # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp'
    """
    # Fatal if raster input and output are not both set
    _lena0 = (len(raster_input) == 0)
    _lenb0 = (len(raster_output) == 0)
    if _lena0 + _lenb0 == 1:
        grass.fatal("You must set both raster input and output, or neither.")
    """

    # Create grid -- overlaps DEM, one cell of padding
    gscript.use_temp_region()
    reg = gscript.region()
    reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows'])
    reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols'])
    g.region(vector=basin, ewres=dx, nsres=dy)
    regnew = gscript.region()
    # Use a grid ratio -- don't match exactly the desired MODFLOW resolution
    grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres'])
    grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres'])
    # Get S, W, and then move the unit number of grid cells over to get N and E
    # and include 3 cells of padding around the whole watershed
    _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres']))
    _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0]
    _s = float(reg_grid_edges_sn[_s_idx])
    _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'],
                        grid_ratio_ns * reg['nsres'])
    _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres']))
    _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0]
    _n = float(_n_grid[_n_idx])
    _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres']))
    _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0]
    _w = float(reg_grid_edges_we[_w_idx])
    _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'],
                        grid_ratio_ew * reg['ewres'])
    _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres']))
    _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0]
    _e = float(_e_grid[_e_idx])
    # Finally make the region
    g.region(w=str(_w),
             e=str(_e),
             s=str(_s),
             n=str(_n),
             nsres=str(grid_ratio_ns * reg['nsres']),
             ewres=str(grid_ratio_ew * reg['ewres']))
    # And then make the grid
    v.mkgrid(map=grid, overwrite=gscript.overwrite())

    # Cell numbers (row, column, continuous ID)
    v.db_addcolumn(map=grid, columns='id int', quiet=True)
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:, colNames == 'row'].astype(int).squeeze()
    cols = colValues[:, colNames == 'col'].astype(int).squeeze()
    nrows = np.max(rows)
    ncols = np.max(cols)
    cats = np.ravel([cats])
    _id = np.ravel([ncols * (rows - 1) + cols])
    _id_cat = []
    for i in range(len(_id)):
        _id_cat.append((_id[i], cats[i]))
    gridTopo = VectorTopo(grid)
    gridTopo.open('rw')
    cur = gridTopo.table.conn.cursor()
    cur.executemany("update " + grid + " set id=? where cat=?", _id_cat)
    gridTopo.table.conn.commit()
    gridTopo.close()

    # Cell area
    v.db_addcolumn(map=grid, columns='area_m2', quiet=True)
    v.to_db(map=grid,
            option='area',
            units='meters',
            columns='area_m2',
            quiet=True)

    # Basin mask
    if len(mask) > 0:
        # Fine resolution region:
        g.region(n=reg['n'],
                 s=reg['s'],
                 w=reg['w'],
                 e=reg['e'],
                 nsres=reg['nsres'],
                 ewres=reg['ewres'])
        # Rasterize basin
        v.to_rast(input=basin,
                  output=mask,
                  use='val',
                  value=1,
                  overwrite=gscript.overwrite(),
                  quiet=True)
        # Coarse resolution region:
        g.region(w=str(_w),
                 e=str(_e),
                 s=str(_s),
                 n=str(_n),
                 nsres=str(grid_ratio_ns * reg['nsres']),
                 ewres=str(grid_ratio_ew * reg['ewres']))
        r.resamp_stats(input=mask,
                       output=mask,
                       method='sum',
                       overwrite=True,
                       quiet=True)
        r.mapcalc(mask + ' = ' + mask + ' > 0', overwrite=True, quiet=True)
    """
    # Resampled raster
    if len(raster_output) > 0:
        r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True)
    """

    # Pour point
    if len(pp) > 0:
        v.db_addcolumn(map=pp,
                       columns=('row integer', 'col integer'),
                       quiet=True)
        v.build(map=pp, quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='row',
                    query_column='row',
                    quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='col',
                    query_column='col',
                    quiet=True)

    # Next point downstream of the pour point
    if len(bc_cell) > 0:
        ########## NEED TO USE TRUE TEMPORARY FILE ##########
        # May not work with dx != dy!
        v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True)
        r.buffer(input='tmp',
                 output='tmp',
                 distances=float(dx) * 1.5,
                 overwrite=True)
        r.mapcalc('tmp = (tmp == 2) * ' + raster_input, overwrite=True)
        r.drain(input=raster_input,
                start_points=pp,
                output='tmp2',
                overwrite=True)
        r.mapcalc('tmp = tmp2 * tmp', overwrite=True)
        r.null(map='tmp', setnull=0)
        r.to_vect(input='tmp',
                  output=bc_cell,
                  type='point',
                  column='z',
                  overwrite=gscript.overwrite(),
                  quiet=True)
        v.db_addcolumn(map=bc_cell,
                       columns=('row integer', 'col integer'),
                       quiet=True)
        v.build(map=bc_cell, quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='row', \
                    query_column='row', quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='col', \
                    query_column='col', quiet=True)

    g.region(n=reg['n'],
             s=reg['s'],
             w=reg['w'],
             e=reg['e'],
             nsres=reg['nsres'],
             ewres=reg['ewres'])
コード例 #20
0
def main():
    """
    Adds GSFLOW parameters to a set of HRU sub-basins
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()
    basins = options["input"]
    HRU = options["output"]
    slope = options["slope"]
    aspect = options["aspect"]
    elevation = options["elevation"]
    land_cover = options["cov_type"]
    soil = options["soil_type"]

    ################################
    # CREATE HRUs FROM SUB-BASINS  #
    ################################

    g.copy(vector=(basins, HRU), overwrite=gscript.overwrite())

    ############################################
    # ATTRIBUTE COLUMNS (IN ORDER FROM MANUAL) #
    ############################################

    # HRU
    hru_columns = []
    # Self ID
    hru_columns.append("id integer")  # nhru
    # Basic Physical Attributes (Geometry)
    hru_columns.append("hru_area double precision")  # acres (!!!!)
    hru_columns.append(
        "hru_area_m2 double precision")  # [not for GSFLOW: for me!]
    hru_columns.append("hru_aspect double precision")  # Mean aspect [degrees]
    hru_columns.append("hru_elev double precision")  # Mean elevation
    hru_columns.append("hru_lat double precision")  # Latitude of centroid
    hru_columns.append("hru_lon double precision")  # Longitude of centroid
    # unnecessary but why not?
    hru_columns.append("hru_slope double precision")  # Mean slope [percent]
    # Basic Physical Attributes (Other)
    # hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1
    # hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default.
    # Measured input
    hru_columns.append(
        "outlet_sta integer")  # Index of streamflow station at basin outlet:
    # station number if it has one, 0 if not
    # Note that the below specify projections and note lat/lon; they really seem
    # to work for any projected coordinates, with _x, _y, in meters, and _xlong,
    # _ylat, in feet (i.e. they are just northing and easting). The meters and feet
    # are not just simple conversions, but actually are required for different
    # modules in the code, and are hence redundant but intentional.
    hru_columns.append("hru_x double precision")  # Easting [m]
    hru_columns.append("hru_xlong double precision")  # Easting [feet]
    hru_columns.append("hru_y double precision")  # Northing [m]
    hru_columns.append("hru_ylat double precision")  # Northing [feet]
    # Streamflow and lake routing
    hru_columns.append(
        "K_coef double precision"
    )  # Travel time of flood wave to next downstream segment;
    # this is the Muskingum storage coefficient
    # 1.0 for reservoirs, diversions, and segments flowing
    # out of the basin
    hru_columns.append(
        "x_coef double precision")  # Amount of attenuation of flow wave;
    # this is the Muskingum routing weighting factor
    # range: 0.0--0.5; default 0.2
    # 0 for all segments flowing out of the basin
    hru_columns.append("hru_segment integer"
                       )  # ID of stream segment to which flow will be routed
    # this is for non-cascade routing (flow goes directly
    # from HRU to stream segment)
    hru_columns.append("obsin_segment integer"
                       )  # Index of measured streamflow station that replaces
    # inflow to a segment
    hru_columns.append(
        "cov_type integer"
    )  # 0=bare soil;1=grasses; 2=shrubs; 3=trees; 4=coniferous
    hru_columns.append("soil_type integer")  # 1=sand; 2=loam; 3=clay

    # Create strings
    hru_columns = ",".join(hru_columns)

    # Add columns to tables
    v.db_addcolumn(map=HRU, columns=hru_columns, quiet=True)

    ###########################
    # UPDATE DATABASE ENTRIES #
    ###########################

    colNames = np.array(gscript.vector_db_select(HRU, layer=1)["columns"])
    colValues = np.array(
        gscript.vector_db_select(HRU, layer=1)["values"].values())
    number_of_hrus = colValues.shape[0]
    cats = colValues[:, colNames == "cat"].astype(int).squeeze()
    rnums = colValues[:, colNames == "rnum"].astype(int).squeeze()

    nhru = np.arange(1, number_of_hrus + 1)
    nhrut = []
    for i in range(len(nhru)):
        nhrut.append((nhru[i], cats[i]))
    # Access the HRUs
    hru = VectorTopo(HRU)
    # Open the map with topology:
    hru.open("rw")
    # Create a cursor
    cur = hru.table.conn.cursor()
    # Use it to loop across the table
    cur.executemany("update " + HRU + " set id=? where cat=?", nhrut)
    # Commit changes to the table
    hru.table.conn.commit()
    # Close the table
    hru.close()
    """
    # Do the same for basins <-------------- DO THIS OR SIMPLY HAVE HRUs OVERLAIN WITH GRID CELLS? IN THIS CASE, RMV AREA ADDITION TO GRAVRES
    v.db_addcolumn(map=basins, columns='id int', quiet=True)
    basins = VectorTopo(basins)
    basins.open('rw')
    cur = basins.table.conn.cursor()
    cur.executemany("update basins set id=? where cat=?", nhrut)
    basins.table.conn.commit()
    basins.close()
    """

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # hru_columns.append('hru_area double precision')
    # Acres b/c USGS
    v.to_db(map=HRU,
            option="area",
            columns="hru_area",
            units="acres",
            quiet=True)
    v.to_db(map=HRU,
            option="area",
            columns="hru_area_m2",
            units="meters",
            quiet=True)

    # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN

    # SLOPE (and aspect)
    #####################
    v.rast_stats(
        map=HRU,
        raster=slope,
        method="average",
        column_prefix="tmp",
        flags="c",
        quiet=True,
    )
    v.db_update(map=HRU,
                column="hru_slope",
                query_column="tmp_average",
                quiet=True)

    # ASPECT
    #########
    v.db_dropcolumn(map=HRU, columns="tmp_average", quiet=True)
    # Dealing with conversion from degrees (no good average) to something I can
    # average -- x- and y-vectors
    # Geographic coordinates, so sin=x, cos=y.... not that it matters so long
    # as I am consistent in how I return to degrees
    r.mapcalc("aspect_x = sin(" + aspect + ")",
              overwrite=gscript.overwrite(),
              quiet=True)
    r.mapcalc("aspect_y = cos(" + aspect + ")",
              overwrite=gscript.overwrite(),
              quiet=True)
    # grass.run_command('v.db.addcolumn', map=HRU, columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer')
    v.rast_stats(
        map=HRU,
        raster="aspect_x",
        method="sum",
        column_prefix="aspect_x",
        flags="c",
        quiet=True,
    )
    v.rast_stats(
        map=HRU,
        raster="aspect_y",
        method="sum",
        column_prefix="aspect_y",
        flags="c",
        quiet=True,
    )
    hru = VectorTopo(HRU)
    hru.open("rw")
    cur = hru.table.conn.cursor()
    cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" % hru.name)
    _arr = np.array(cur.fetchall()).astype(float)
    _cat = _arr[:, 0]
    _aspect_x_sum = _arr[:, 1]
    _aspect_y_sum = _arr[:, 2]
    aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180.0 / np.pi
    aspect_angle[aspect_angle < 0] += 360  # all positive
    aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose()
    cur.executemany("update " + HRU + " set hru_aspect=? where cat=?",
                    aspect_angle_cat)
    hru.table.conn.commit()
    hru.close()

    # ELEVATION
    ############
    v.rast_stats(
        map=HRU,
        raster=elevation,
        method="average",
        column_prefix="tmp",
        flags="c",
        quiet=True,
    )
    v.db_update(map=HRU,
                column="hru_elev",
                query_column="tmp_average",
                quiet=True)
    v.db_dropcolumn(map=HRU, columns="tmp_average", quiet=True)

    # CENTROIDS
    ############

    # get x,y of centroid -- but have areas not in database table, that do have
    # centroids, and having a hard time finding a good way to get rid of them!
    # They have duplicate category values!
    # Perhaps these are little dangles on the edges of the vectorization where
    # the raster value was the same but pinched out into 1-a few cells?
    # From looking at map, lots of extra centroids on area boundaries, and removing
    # small areas (though threshold hard to guess) gets rid of these

    hru = VectorTopo(HRU)
    hru.open("rw")
    hru_cats = []
    hru_coords = []
    for hru_i in hru:
        if isinstance(hru_i, vector.geometry.Centroid):
            hru_cats.append(hru_i.cat)
            hru_coords.append(hru_i.coords())
    hru_cats = np.array(hru_cats)
    hru_coords = np.array(hru_coords)
    hru.rewind()

    hru_area_ids = []
    for coor in hru_coords:
        _area = hru.find_by_point.area(Point(coor[0], coor[1]))
        hru_area_ids.append(_area)
    hru_area_ids = np.array(hru_area_ids)
    hru.rewind()

    hru_areas = []
    for _area_id in hru_area_ids:
        hru_areas.append(_area_id.area())
    hru_areas = np.array(hru_areas)
    hru.rewind()

    allcats = sorted(list(set(list(hru_cats))))

    # Now create weighted mean
    hru_centroid_locations = []
    for cat in allcats:
        hrus_with_cat = hru_cats[hru_cats == cat]
        if len(hrus_with_cat) == 1:
            hru_centroid_locations.append(
                (hru_coords[hru_cats == cat]).squeeze())
        else:
            _centroids = hru_coords[hru_cats == cat]
            # print _centroids
            _areas = hru_areas[hru_cats == cat]
            # print _areas
            _x = np.average(_centroids[:, 0], weights=_areas)
            _y = np.average(_centroids[:, 1], weights=_areas)
            # print _x, _y
            hru_centroid_locations.append(np.array([_x, _y]))

    # Now upload weighted mean to database table
    # allcats and hru_centroid_locations are co-indexed
    index__cats = create_iterator(HRU)
    cur = hru.table.conn.cursor()
    for i in range(len(allcats)):
        # meters
        cur.execute("update " + HRU + " set hru_x=" +
                    str(hru_centroid_locations[i][0]) + " where cat=" +
                    str(allcats[i]))
        cur.execute("update " + HRU + " set hru_y=" +
                    str(hru_centroid_locations[i][1]) + " where cat=" +
                    str(allcats[i]))
        # feet
        cur.execute("update " + HRU + " set hru_xlong=" +
                    str(hru_centroid_locations[i][0] * 3.28084) +
                    " where cat=" + str(allcats[i]))
        cur.execute("update " + HRU + " set hru_ylat=" +
                    str(hru_centroid_locations[i][1] * 3.28084) +
                    " where cat=" + str(allcats[i]))
        # (un)Project to lat/lon
        _centroid_ll = gscript.parse_command("m.proj",
                                             coordinates=list(
                                                 hru_centroid_locations[i]),
                                             flags="od").keys()[0]
        _lon, _lat, _z = _centroid_ll.split("|")
        cur.execute("update " + HRU + " set hru_lon=" + _lon + " where cat=" +
                    str(allcats[i]))
        cur.execute("update " + HRU + " set hru_lat=" + _lat + " where cat=" +
                    str(allcats[i]))

    # feet -- not working.
    # Probably an issue with index__cats -- maybe fix later, if needed
    # But currently not a major speed issue
    """
    cur.executemany("update "+HRU+" set hru_xlong=?*3.28084 where hru_x=?",
                    index__cats)
    cur.executemany("update "+HRU+" set hru_ylat=?*3.28084 where hru_y=?",
                    index__cats)
    """

    cur.close()
    hru.table.conn.commit()
    hru.close()

    # ID NUMBER
    ############
    # cur.executemany("update "+HRU+" set hru_segment=? where id=?",
    #                index__cats)
    # Segment number = HRU ID number
    v.db_update(map=HRU, column="hru_segment", query_column="id", quiet=True)

    # LAND USE/COVER
    ############
    try:
        land_cover = int(land_cover)
    except:
        pass
    if isinstance(land_cover, int):
        if land_cover <= 3:
            v.db_update(map=HRU,
                        column="cov_type",
                        value=land_cover,
                        quiet=True)
        else:
            sys.exit(
                "WARNING: INVALID LAND COVER TYPE. CHECK INTEGER VALUES.\n"
                "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW")
    else:
        # NEED TO UPDATE THIS TO MODAL VALUE!!!!
        gscript.message(
            "Warning: values taken from HRU centroids. Code should be updated to"
        )
        gscript.message("acquire modal values")
        v.what_rast(map=HRU,
                    type="centroid",
                    raster=land_cover,
                    column="cov_type",
                    quiet=True)
        # v.rast_stats(map=HRU, raster=land_cover, method='average', column_prefix='tmp', flags='c', quiet=True)
        # v.db_update(map=HRU, column='cov_type', query_column='tmp_average', quiet=True)
        # v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)

    # SOIL
    ############
    try:
        soil = int(soil)
    except:
        pass
    if isinstance(soil, int):
        if (soil > 0) and (soil <= 3):
            v.db_update(map=HRU, column="soil_type", value=soil, quiet=True)
        else:
            sys.exit("WARNING: INVALID SOIL TYPE. CHECK INTEGER VALUES.\n"
                     "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW")
    else:
        # NEED TO UPDATE THIS TO MODAL VALUE!!!!
        gscript.message(
            "Warning: values taken from HRU centroids. Code should be updated to"
        )
        gscript.message("acquire modal values")
        v.what_rast(map=HRU,
                    type="centroid",
                    raster=soil,
                    column="soil_type",
                    quiet=True)
コード例 #21
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    options, flags = gscript.parser()

    streams = options["input_streams"]
    basins = options["input_basins"]
    downstream_cat = options["cat"]
    x_outlet = float(options["x_outlet"])
    y_outlet = float(options["y_outlet"])
    output_basins = options["output_basin"]
    output_streams = options["output_streams"]
    output_pour_point = options["output_pour_point"]
    draindir = options["draindir"]
    snapflag = flags["s"]

    # print options
    # print flags

    # Check that either x,y or cat are set
    if (downstream_cat != "") or ((x_outlet != "") and (y_outlet != "")):
        pass
    else:
        gscript.fatal(
            'You must set either "cat" or "x_outlet" and "y_outlet".')

    # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!!
    if snapflag or (downstream_cat != ""):
        if downstream_cat == "":
            # Need to find outlet pour point -- start by creating a point at this
            # location to use with v.distance
            try:
                v.db_droptable(table="tmp", flags="f")
            except:
                pass
            tmp = vector.Vector("tmp")
            _cols = [
                (u"cat", "INTEGER PRIMARY KEY"),
                (u"x", "DOUBLE PRECISION"),
                (u"y", "DOUBLE PRECISION"),
                (u"strcat", "DOUBLE PRECISION"),
            ]
            tmp.open("w", tab_name="tmp", tab_cols=_cols)
            point0 = Point(x_outlet, y_outlet)
            tmp.write(
                point0,
                cat=1,
                attrs=(str(x_outlet), str(y_outlet), 0),
            )
            tmp.table.conn.commit()
            tmp.build()
            tmp.close()
            # Now v.distance
            gscript.run_command("v.distance",
                                from_="tmp",
                                to=streams,
                                upload="cat",
                                column="strcat")
            # v.distance(_from_='tmp', to=streams, upload='cat', column='strcat')
            downstream_cat = gscript.vector_db_select(map="tmp",
                                                      columns="strcat")
            downstream_cat = int(downstream_cat["values"].values()[0][0])

        # Attributes of streams
        colNames = np.array(vector_db_select(streams)["columns"])
        colValues = np.array(vector_db_select(streams)["values"].values())
        tostream = colValues[:, colNames == "tostream"].astype(int).squeeze()
        cats = colValues[:, colNames == "cat"].astype(
            int).squeeze()  # = "fromstream"

        # Find network
        basincats = [downstream_cat]  # start here
        most_upstream_cats = [
            downstream_cat
        ]  # all of those for which new cats must be sought
        while True:
            if len(most_upstream_cats) == 0:
                break
            tmp = list(most_upstream_cats)  # copy to a temp file: old values
            most_upstream_cats = []  # Ready to accept new values
            for ucat in tmp:
                most_upstream_cats += list(cats[tostream == int(ucat)])
                basincats += most_upstream_cats

        basincats = list(set(list(basincats)))

        basincats_str = ",".join(map(str, basincats))

        # Many basins out -- need to use overwrite flag in future!
        # SQL_OR = 'rnum = ' + ' OR rnum = '.join(map(str, basincats))
        # SQL_OR = 'cat = ' + ' OR cat = '.join(map(str, basincats))
        SQL_LIST = "cat IN (" + ", ".join(map(str, basincats)) + ")"
        if len(basins) > 0:
            v.extract(
                input=basins,
                output=output_basins,
                where=SQL_LIST,
                overwrite=gscript.overwrite(),
                quiet=True,
            )
        if len(streams) > 0:
            v.extract(
                input=streams,
                output=output_streams,
                cats=basincats_str,
                overwrite=gscript.overwrite(),
                quiet=True,
            )

    else:
        # Have coordinates and will limit the area that way.
        r.water_outlet(
            input=draindir,
            output="tmp",
            coordinates=(x_outlet, y_outlet),
            overwrite=True,
        )
        r.to_vect(input="tmp", output="tmp", type="area", overwrite=True)
        v.clip(input=basins, clip="tmp", output=output_basins, overwrite=True)
        basincats = gscript.vector_db_select(
            "basins_inbasin").values()[0].keys()
        basincats_str = ",".join(map(str, basincats))
        if len(streams) > 0:
            v.extract(
                input=streams,
                output=output_streams,
                cats=basincats_str,
                overwrite=gscript.overwrite(),
                quiet=True,
            )

    # If we want to output the pour point location
    if len(output_pour_point) > 0:
        # NEED TO ADD IF-STATEMENT HERE TO AVOID AUTOMATIC OVERWRITING!!!!!!!!!!!
        try:
            v.db_droptable(table=output_pour_point, flags="f")
        except:
            pass
        if snapflag or (downstream_cat != ""):
            _pp = gscript.vector_db_select(map=streams,
                                           columns="x2,y2",
                                           where="cat=" + str(downstream_cat))
            _xy = np.squeeze(_pp["values"].values())
            _x = float(_xy[0])
            _y = float(_xy[1])
        else:
            _x = x_outlet
            _y = y_outlet
        pptmp = vector.Vector(output_pour_point)
        _cols = [
            (u"cat", "INTEGER PRIMARY KEY"),
            (u"x", "DOUBLE PRECISION"),
            (u"y", "DOUBLE PRECISION"),
        ]
        pptmp.open("w", tab_name=output_pour_point, tab_cols=_cols)
        point0 = Point(_x, _y)
        pptmp.write(
            point0,
            cat=1,
            attrs=(str(_x), str(_y)),
        )
        pptmp.table.conn.commit()
        pptmp.build()
        pptmp.close()
コード例 #22
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary 
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    options, flags = gscript.parser()
    streams = options['streams']

    streamsTopo = VectorTopo(streams)
    streamsTopo.build()

    # Is this faster than v.to.db?
    # Works more consistently, at least
    v.to_db(map=streams, option='start', columns='x1,y1')
    v.to_db(map=streams, option='end', columns='x2,y2')

    # 1. Get vectorTopo
    streamsTopo.open(mode='rw')
    points_in_streams = []
    cat_of_line_segment = []
    """
    # 2. Get coordinates
    for row in streamsTopo:
        cat_of_line_segment.append(row.cat)
        if type(row) == vector.geometry.Line:
            points_in_streams.append(row)

    # 3. Coordinates of points: 1 = start, 2 = end
    try:
        streamsTopo.table.columns.add('x1','double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('y1','double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('x2','double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('y2','double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('tostream','int')
    except:
        pass
    #streamsTopo.table.conn.commit()
    #streamsTopo.build()
    #streamsTopo.close()
    """

    cur = streamsTopo.table.conn.cursor()
    for i in range(len(points_in_streams)):
        cur.execute("update streams set x1=" + str(points_in_streams[i][0].x) +
                    " where cat=" + str(cat_of_line_segment[i]))
        cur.execute("update streams set y1=" + str(points_in_streams[i][0].y) +
                    " where cat=" + str(cat_of_line_segment[i]))
        cur.execute("update streams set x2=" +
                    str(points_in_streams[i][-1].x) + " where cat=" +
                    str(cat_of_line_segment[i]))
        cur.execute("update streams set y2=" +
                    str(points_in_streams[i][-1].y) + " where cat=" +
                    str(cat_of_line_segment[i]))
    streamsTopo.table.conn.commit()
    streamsTopo.build()

    colNames = np.array(vector_db_select('streams')['columns'])
    colValues = np.array(vector_db_select('streams')['values'].values())
    cats = colValues[:,
                     colNames == 'cat'].astype(int).squeeze()  # river number
    print colValues
    xy1 = colValues[:, (colNames == 'x1') + (colNames == 'y1')].astype(
        float)  # upstream
    xy2 = colValues[:, (colNames == 'x2') + (colNames == 'y2')].astype(
        float)  # downstream

    # Build river network
    tocat = []
    for i in range(len(cats)):
        tosegment_mask = np.prod(xy1 == xy2[i], axis=1)
        if np.sum(tosegment_mask) == 0:
            tocat.append(0)
        else:
            tocat.append(cats[tosegment_mask.nonzero()[0][0]])
    tocat = np.asarray(tocat).astype(int)

    # This gives us a set of downstream-facing adjacencies.
    # We will update the database with it.
    streamsTopo.build()
    streamsTopo.open('rw')
    cur = streamsTopo.table.conn.cursor()
    for i in range(len(tocat)):
        cur.execute("update streams set tostream=" + str(tocat[i]) +
                    " where cat=" + str(cats[i]))
    streamsTopo.table.conn.commit()
    streamsTopo.build()

    print ""
    print "Done."
    print ""
コード例 #23
0
window = float(options['window'])
accum_mult = float(options['accum_mult'])
if options['units'] == 'm2':
    accum_label = 'Drainage area [m$^2$]'
elif options['units'] == 'km2':
    accum_label = 'Drainage area [km$^2$]'
elif options['units'] == 'cumecs':
    accum_label = 'Water discharge [m$^3$ s$^{-1}$]'
elif options['units'] == 'cfs':
    accum_label = 'Water discharge [cfs]'
else:
    accum_label = 'Flow accumulation [$-$]'
plots = options['plots'].split(',')

# Attributes of streams
colNames = np.array(vector_db_select(options['streams'])['columns'])
colValues = np.array(vector_db_select(options['streams'])['values'].values())
warnings.warn('tostream is not generalized')
tostream = colValues[:,colNames == 'tostream'].astype(int).squeeze()
cats = colValues[:,colNames == 'cat'].astype(int).squeeze() # = "fromstream"

# We can loop over this list to get the shape of the full river network.
selected_cats = []
segment = int(options['cat'])
selected_cats.append(segment)
x = []
z = []

# ParallelTest

# Import data by segment
コード例 #24
0
def main():
    """
    Builds river segments for input to the USGS hydrologic models
    PRMS and GSFLOW.
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()

    # I/O
    streams = options['input']
    segments = options['output']

    # Hydraulic geometry
    ICALC = options['icalc']

    # ICALC=0: Constant depth
    WIDTH1 = options['width1']
    WIDTH2 = options['width2']

    # ICALC=1: Manning
    ROUGHCH = options['roughch']

    # ICALC=2: Manning
    ROUGHBK = options['roughbk']

    # ICALC=3: Power-law relationships (following Leopold and others)
    # The at-a-station default exponents are from Rhodes (1977)
    CDPTH = str(float(options['cdpth']) / 35.3146667)  # cfs to m^3/s
    FDPTH = options['fdpth']
    AWDTH = str(float(options['awdth']) / 35.3146667)  # cfs to m^3/s
    BWDTH = options['bwdth']

    ##################################################
    # CHECKING DEPENDENCIES WITH OPTIONAL PARAMETERS #
    ##################################################

    if ICALC == 3:
        if CDPTH and FDPTH and AWDTH and BWDTH:
            pass
        else:
            grass.fatal('Missing CDPTH, FDPTH, AWDTH, and/or BWDTH. \
                         These are required when ICALC = 3.')

    ###########
    # RUNNING #
    ###########

    # New Columns for Segments
    segment_columns = []
    # Self ID
    segment_columns.append('id integer')  # segment number
    segment_columns.append('ISEG integer')  # segment number
    segment_columns.append('NSEG integer')  # segment number
    # for GSFLOW
    segment_columns.append('ICALC integer')  # 3 for power function
    segment_columns.append(
        'OUTSEG integer')  # downstream segment -- tostream, renumbered
    segment_columns.append('ROUGHCH double precision')  # overbank roughness
    segment_columns.append('ROUGHBK double precision')  # in-channel roughness
    segment_columns.append('WIDTH1 double precision')  # overbank roughness
    segment_columns.append('WIDTH2 double precision')  # in-channel roughness
    segment_columns.append('CDPTH double precision')  # depth coeff
    segment_columns.append('FDPTH double precision')  # depth exp
    segment_columns.append('AWDTH double precision')  # width coeff
    segment_columns.append('BWDTH double precision')  # width exp
    # The below will be all 0
    segment_columns.append(
        'IUPSEG varchar')  # upstream segment ID number, for diversions
    segment_columns.append('FLOW varchar')
    segment_columns.append('RUNOFF varchar')
    segment_columns.append('ETSW varchar')
    segment_columns.append('PPTSW varchar')

    segment_columns = ",".join(segment_columns)

    # CONSIDER THE EFFECT OF OVERWRITING COLUMNS -- WARN FOR THIS
    # IF MAP EXISTS ALREADY?

    # Create a map to work with
    g.copy(vector=(streams, segments), overwrite=gscript.overwrite())
    # and add its columns
    v.db_addcolumn(map=segments, columns=segment_columns)

    # Produce the data table entries
    ##################################
    colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(segments, layer=1)['values'].values())
    number_of_segments = colValues.shape[0]
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()

    nseg = np.arange(1, len(cats) + 1)
    nseg_cats = []
    for i in range(len(cats)):
        nseg_cats.append((nseg[i], cats[i]))

    segmentsTopo = VectorTopo(segments)
    segmentsTopo.open('rw')
    cur = segmentsTopo.table.conn.cursor()

    # id = cat (as does ISEG and NSEG)
    cur.executemany("update " + segments + " set id=? where cat=?", nseg_cats)
    cur.executemany("update " + segments + " set ISEG=? where cat=?",
                    nseg_cats)
    cur.executemany("update " + segments + " set NSEG=? where cat=?",
                    nseg_cats)

    # outseg = tostream: default is 0 if "tostream" is off-map
    cur.execute("update " + segments + " set OUTSEG=0")
    cur.executemany("update " + segments + " set OUTSEG=? where tostream=?",
                    nseg_cats)

    # Discharge and hydraulic geometry
    cur.execute("update " + segments + " set WIDTH1=" + str(WIDTH1))
    cur.execute("update " + segments + " set WIDTH2=" + str(WIDTH2))
    cur.execute("update " + segments + " set ROUGHCH=" + str(ROUGHCH))
    cur.execute("update " + segments + " set ROUGHBK=" + str(ROUGHBK))
    cur.execute("update " + segments + " set ICALC=" + str(ICALC))
    cur.execute("update " + segments + " set CDPTH=" + str(CDPTH))
    cur.execute("update " + segments + " set FDPTH=" + str(FDPTH))
    cur.execute("update " + segments + " set AWDTH=" + str(AWDTH))
    cur.execute("update " + segments + " set BWDTH=" + str(BWDTH))

    gscript.message('')
    gscript.message('NOTICE: not currently used:')
    gscript.message('IUPSEG, FLOW, RUNOFF, ETSW, and PPTSW.')
    gscript.message('All set to 0.')
    gscript.message('')

    # values that are 0
    cur.execute("update " + segments + " set IUPSEG=" + str(0))
    cur.execute("update " + segments + " set FLOW=" + str(0))
    cur.execute("update " + segments + " set RUNOFF=" + str(0))
    cur.execute("update " + segments + " set ETSW=" + str(0))
    cur.execute("update " + segments + " set PPTSW=" + str(0))

    segmentsTopo.table.conn.commit()
    segmentsTopo.close()
コード例 #25
0
def main():
    """
    Builds river reaches for input to the USGS hydrologic model, GSFLOW.
    These reaches link the PRMS stream segments to the MODFLOW grid cells.
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()
    segments = options['segment_input']
    grid = options['grid_input']
    reaches = options['output']
    elevation = options['elevation']
    Smin = options['s_min']
    h_stream = options['h_stream']
    x1 = options['upstream_easting_column_seg']
    y1 = options['upstream_northing_column_seg']
    x2 = options['downstream_easting_column_seg']
    y2 = options['downstream_northing_column_seg']
    tostream = options['tostream_cat_column_seg']
    # Hydraulic paramters
    STRTHICK = options['strthick']
    STRHC1 = options['strhc1']
    THTS = options['thts']
    THTI = options['thti']
    EPS = options['eps']
    UHC = options['uhc']
    # Build reach maps by overlaying segments on grid
    if len(gscript.find_file(segments, element='vector')['name']) > 0:
        v.extract(input=segments, output='GSFLOW_TEMP__', type='line', quiet=True, overwrite=True)
        v.overlay(ainput='GSFLOW_TEMP__', atype='line', binput=grid, output=reaches, operator='and', overwrite=gscript.overwrite(), quiet=True)
        g.remove(type='vector', name='GSFLOW_TEMP__', quiet=True, flags='f')
    else:
        gscript.fatal('No vector file "'+segments+'" found.')

    # Start editing database table
    reachesTopo = VectorTopo(reaches)
    reachesTopo.open('rw')

    # Rename a,b columns
    reachesTopo.table.columns.rename('a_'+x1, 'x1')
    reachesTopo.table.columns.rename('a_'+x2, 'x2')
    reachesTopo.table.columns.rename('a_'+y1, 'y1')
    reachesTopo.table.columns.rename('a_'+y2, 'y2')
    reachesTopo.table.columns.rename('a_NSEG', 'NSEG')
    reachesTopo.table.columns.rename('a_ISEG', 'ISEG')
    reachesTopo.table.columns.rename('a_stream_type', 'stream_type')
    reachesTopo.table.columns.rename('a_type_code', 'type_code')
    reachesTopo.table.columns.rename('a_cat', 'rnum_cat')
    reachesTopo.table.columns.rename('a_'+tostream, 'tostream')
    reachesTopo.table.columns.rename('a_id', 'segment_id')
    reachesTopo.table.columns.rename('a_OUTSEG', 'OUTSEG')
    reachesTopo.table.columns.rename('b_row', 'row')
    reachesTopo.table.columns.rename('b_col', 'col')
    reachesTopo.table.columns.rename('b_id', 'cell_id')

    # Drop unnecessary columns
    cols = reachesTopo.table.columns.names()
    for col in cols:
        if (col[:2] == 'a_') or (col[:2] == 'b_'):
            reachesTopo.table.columns.drop(col)

    # Add new columns to 'reaches'
    reachesTopo.table.columns.add('KRCH', 'integer')
    reachesTopo.table.columns.add('IRCH', 'integer')
    reachesTopo.table.columns.add('JRCH', 'integer')
    reachesTopo.table.columns.add('IREACH', 'integer')
    reachesTopo.table.columns.add('RCHLEN', 'integer')
    reachesTopo.table.columns.add('STRTOP', 'double precision')
    reachesTopo.table.columns.add('SLOPE', 'double precision')
    reachesTopo.table.columns.add('STRTHICK', 'double precision')
    reachesTopo.table.columns.add('STRHC1', 'double precision')
    reachesTopo.table.columns.add('THTS', 'double precision')
    reachesTopo.table.columns.add('THTI', 'double precision')
    reachesTopo.table.columns.add('EPS', 'double precision')
    reachesTopo.table.columns.add('UHC', 'double precision')
    reachesTopo.table.columns.add('xr1', 'double precision')
    reachesTopo.table.columns.add('xr2', 'double precision')
    reachesTopo.table.columns.add('yr1', 'double precision')
    reachesTopo.table.columns.add('yr2', 'double precision')

    # Commit columns before editing (necessary?)
    reachesTopo.table.conn.commit()
    reachesTopo.close()

    # Update some columns that can be done now
    reachesTopo.open('rw')
    colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns'])
    colValues = np.array(gscript.vector_db_select(reaches, layer=1)['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    nseg = np.arange(1, len(cats)+1)
    nseg_cats = []
    for i in range(len(cats)):
        nseg_cats.append( (nseg[i], cats[i]) )
    cur = reachesTopo.table.conn.cursor()
    # Hydrogeologic properties
    cur.execute("update "+reaches+" set STRTHICK="+str(STRTHICK))
    cur.execute("update "+reaches+" set STRHC1="+str(STRHC1))
    cur.execute("update "+reaches+" set THTS="+str(THTS))
    cur.execute("update "+reaches+" set THTI="+str(THTI))
    cur.execute("update "+reaches+" set EPS="+str(EPS))
    cur.execute("update "+reaches+" set UHC="+str(UHC))
    # Grid properties
    cur.execute("update "+reaches+" set KRCH=1") # Top layer: unchangable
    cur.executemany("update "+reaches+" set IRCH=? where row=?", nseg_cats)
    cur.executemany("update "+reaches+" set JRCH=? where col=?", nseg_cats)
    reachesTopo.table.conn.commit()
    reachesTopo.close()
    v.to_db(map=reaches, columns='RCHLEN', option='length', quiet=True)


    # Still to go after these:
    # STRTOP (added with slope)
    # IREACH (whole next section dedicated to this)
    # SLOPE (need z_start and z_end)

    # Now, the light stuff is over: time to build the reach order
    v.to_db(map=reaches, option='start', columns='xr1,yr1')
    v.to_db(map=reaches, option='end', columns='xr2,yr2')

    # Now just sort by category, find which stream has the same xr1 and yr1 as
    # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another 
    # starting point and move down the line.
    # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1"

    # First, get the starting coordinates of each stream segment
    # and a set of river ID's (ordered from 1...N)
    colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns'])
    colValues = np.array(gscript.vector_db_select(segments, layer=1)['values'].values())
    number_of_segments = colValues.shape[0]
    segment_x1s = colValues[:,colNames == 'x1'].astype(float).squeeze()
    segment_y1s = colValues[:,colNames == 'y1'].astype(float).squeeze()
    segment_ids = colValues[:,colNames == 'id'].astype(float).squeeze()

    # Then move back to the reaches map to produce the ordering
    colNames = np.array(gscript.vector_db_select(reaches, layer=1)['columns'])
    colValues = np.array(gscript.vector_db_select(reaches, layer=1)['values'].values())
    reach_cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    reach_x1s = colValues[:,colNames == 'xr1'].astype(float).squeeze()
    reach_y1s = colValues[:,colNames == 'yr1'].astype(float).squeeze()
    reach_x2s = colValues[:,colNames == 'xr2'].astype(float).squeeze()
    reach_y2s = colValues[:,colNames == 'yr2'].astype(float).squeeze()
    segment_ids__reach = colValues[:,colNames == 'segment_id'].astype(float).squeeze()

    for segment_id in segment_ids:
        reach_order_cats = []
        downstream_directed = []
        ssel = segment_ids == segment_id
        rsel = segment_ids__reach == segment_id # selector
        # Find first segment: x1y1 first here, but not necessarily later
        downstream_directed.append(1)
        _x_match = reach_x1s[rsel] == segment_x1s[ssel]
        _y_match = reach_y1s[rsel] == segment_y1s[ssel]
        _i_match = _x_match * _y_match
        x1y1 = True # false if x2y2
        # Find cat
        _cat = int(reach_cats[rsel][_x_match * _y_match])
        reach_order_cats.append(_cat)
        # Get end of reach = start of next one
        reach_x_end = float(reach_x2s[reach_cats == _cat])
        reach_y_end = float(reach_y2s[reach_cats == _cat])
        while _i_match.any():
            _x_match = reach_x1s[rsel] == reach_x_end
            _y_match = reach_y1s[rsel] == reach_y_end
            _i_match = _x_match * _y_match
            if _i_match.any():
                _cat = int(reach_cats[rsel][_x_match * _y_match])
                reach_x_end = float(reach_x2s[reach_cats == _cat])
                reach_y_end = float(reach_y2s[reach_cats == _cat])
                reach_order_cats.append(_cat)
        print len(reach_order_cats), len(reach_cats[rsel])
          
        # Reach order to database table
        reach_number__reach_order_cats = []
        for i in range(len(reach_order_cats)):
            reach_number__reach_order_cats.append( (i+1, reach_order_cats[i]) )
        reachesTopo = VectorTopo(reaches)
        reachesTopo.open('rw')
        cur = reachesTopo.table.conn.cursor()
        cur.executemany("update "+reaches+" set IREACH=? where cat=?", 
                        reach_number__reach_order_cats)
        reachesTopo.table.conn.commit()
        reachesTopo.close()
      

    # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!!
    # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END

    # Compute slope and starting elevations from the elevations at the start and 
    # end of the reaches and the length of each reach]
    gscript.message('Obtaining elevation values from raster: may take time.')
    v.db_addcolumn(map=reaches, columns='zr1 double precision, zr2 double precision')
    zr1 = []
    zr2 = []
    for i in range(len(reach_cats)):
        _x = reach_x1s[i]
        _y = reach_y1s[i]
        _z = float(gscript.parse_command('r.what', map=elevation, coordinates=str(_x)+','+str(_y)).keys()[0].split('|')[-1])
        zr1.append(_z)
        _x = reach_x2s[i]
        _y = reach_y2s[i]
        _z = float(gscript.parse_command('r.what', map=elevation, coordinates=str(_x)+','+str(_y)).keys()[0].split('|')[-1])
        zr2.append(_z)

    zr1_cats = []
    zr2_cats = []
    for i in range(len(reach_cats)):
        zr1_cats.append( (zr1[i], reach_cats[i]) )
        zr2_cats.append( (zr2[i], reach_cats[i]) )

    reachesTopo = VectorTopo(reaches)
    reachesTopo.open('rw')
    cur = reachesTopo.table.conn.cursor()
    cur.executemany("update "+reaches+" set zr1=? where cat=?", zr1_cats)
    cur.executemany("update "+reaches+" set zr2=? where cat=?", zr2_cats)
    reachesTopo.table.conn.commit()
    reachesTopo.close()

    # Use these to create slope -- backwards possible on DEM!
    v.db_update(map=reaches, column='SLOPE', value='(zr1 - zr2)/RCHLEN')
    v.db_update(map=reaches, column='SLOPE', value=Smin, where='SLOPE <= '+str(Smin))

    # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid)
    #  resolution
    # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o#
    # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!!
    v.db_addcolumn(map=reaches, columns='z_topo_mean double precision')
    v.what_rast(map=reaches, raster=elevation, column='z_topo_mean')#, query_column='z')
    v.db_update(map=reaches, column='STRTOP', value='z_topo_mean -'+str(h_stream), quiet=True)
コード例 #26
0
def main():
    """
    Builds river reaches for input to the USGS hydrologic model, GSFLOW.
    These reaches link the PRMS stream segments to the MODFLOW grid cells.
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()
    segments = options["segment_input"]
    grid = options["grid_input"]
    reaches = options["output"]
    elevation = options["elevation"]
    Smin = options["s_min"]
    h_stream = options["h_stream"]
    x1 = options["upstream_easting_column_seg"]
    y1 = options["upstream_northing_column_seg"]
    x2 = options["downstream_easting_column_seg"]
    y2 = options["downstream_northing_column_seg"]
    tostream = options["tostream_cat_column_seg"]
    # Hydraulic paramters
    STRTHICK = options["strthick"]
    STRHC1 = options["strhc1"]
    THTS = options["thts"]
    THTI = options["thti"]
    EPS = options["eps"]
    UHC = options["uhc"]
    # Build reach maps by overlaying segments on grid
    if len(gscript.find_file(segments, element="vector")["name"]) > 0:
        v.extract(
            input=segments,
            output="GSFLOW_TEMP__",
            type="line",
            quiet=True,
            overwrite=True,
        )
        v.overlay(
            ainput="GSFLOW_TEMP__",
            atype="line",
            binput=grid,
            output=reaches,
            operator="and",
            overwrite=gscript.overwrite(),
            quiet=True,
        )
        g.remove(type="vector", name="GSFLOW_TEMP__", quiet=True, flags="f")
    else:
        gscript.fatal('No vector file "' + segments + '" found.')

    # Start editing database table
    reachesTopo = VectorTopo(reaches)
    reachesTopo.open("rw")

    # Rename a,b columns
    reachesTopo.table.columns.rename("a_" + x1, "x1")
    reachesTopo.table.columns.rename("a_" + x2, "x2")
    reachesTopo.table.columns.rename("a_" + y1, "y1")
    reachesTopo.table.columns.rename("a_" + y2, "y2")
    reachesTopo.table.columns.rename("a_NSEG", "NSEG")
    reachesTopo.table.columns.rename("a_ISEG", "ISEG")
    reachesTopo.table.columns.rename("a_stream_type", "stream_type")
    reachesTopo.table.columns.rename("a_type_code", "type_code")
    reachesTopo.table.columns.rename("a_cat", "rnum_cat")
    reachesTopo.table.columns.rename("a_" + tostream, "tostream")
    reachesTopo.table.columns.rename("a_id", "segment_id")
    reachesTopo.table.columns.rename("a_OUTSEG", "OUTSEG")
    reachesTopo.table.columns.rename("b_row", "row")
    reachesTopo.table.columns.rename("b_col", "col")
    reachesTopo.table.columns.rename("b_id", "cell_id")

    # Drop unnecessary columns
    cols = reachesTopo.table.columns.names()
    for col in cols:
        if (col[:2] == "a_") or (col[:2] == "b_"):
            reachesTopo.table.columns.drop(col)

    # Add new columns to 'reaches'
    reachesTopo.table.columns.add("KRCH", "integer")
    reachesTopo.table.columns.add("IRCH", "integer")
    reachesTopo.table.columns.add("JRCH", "integer")
    reachesTopo.table.columns.add("IREACH", "integer")
    reachesTopo.table.columns.add("RCHLEN", "double precision")
    reachesTopo.table.columns.add("STRTOP", "double precision")
    reachesTopo.table.columns.add("SLOPE", "double precision")
    reachesTopo.table.columns.add("STRTHICK", "double precision")
    reachesTopo.table.columns.add("STRHC1", "double precision")
    reachesTopo.table.columns.add("THTS", "double precision")
    reachesTopo.table.columns.add("THTI", "double precision")
    reachesTopo.table.columns.add("EPS", "double precision")
    reachesTopo.table.columns.add("UHC", "double precision")
    reachesTopo.table.columns.add("xr1", "double precision")
    reachesTopo.table.columns.add("xr2", "double precision")
    reachesTopo.table.columns.add("yr1", "double precision")
    reachesTopo.table.columns.add("yr2", "double precision")

    # Commit columns before editing (necessary?)
    reachesTopo.table.conn.commit()
    reachesTopo.close()

    # Update some columns that can be done now
    reachesTopo.open("rw")
    colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"])
    colValues = np.array(
        gscript.vector_db_select(reaches, layer=1)["values"].values())
    cats = colValues[:, colNames == "cat"].astype(int).squeeze()
    nseg = np.arange(1, len(cats) + 1)
    nseg_cats = []
    for i in range(len(cats)):
        nseg_cats.append((nseg[i], cats[i]))
    cur = reachesTopo.table.conn.cursor()
    # Hydrogeologic properties
    cur.execute("update " + reaches + " set STRTHICK=" + str(STRTHICK))
    cur.execute("update " + reaches + " set STRHC1=" + str(STRHC1))
    cur.execute("update " + reaches + " set THTS=" + str(THTS))
    cur.execute("update " + reaches + " set THTI=" + str(THTI))
    cur.execute("update " + reaches + " set EPS=" + str(EPS))
    cur.execute("update " + reaches + " set UHC=" + str(UHC))
    # Grid properties
    cur.execute("update " + reaches + " set KRCH=1")  # Top layer: unchangable
    cur.executemany("update " + reaches + " set IRCH=? where row=?", nseg_cats)
    cur.executemany("update " + reaches + " set JRCH=? where col=?", nseg_cats)
    reachesTopo.table.conn.commit()
    reachesTopo.close()
    v.to_db(map=reaches, columns="RCHLEN", option="length", quiet=True)

    # Still to go after these:
    # STRTOP (added with slope)
    # IREACH (whole next section dedicated to this)
    # SLOPE (need z_start and z_end)

    # Now, the light stuff is over: time to build the reach order
    v.to_db(map=reaches, option="start", columns="xr1,yr1")
    v.to_db(map=reaches, option="end", columns="xr2,yr2")

    # Now just sort by category, find which stream has the same xr1 and yr1 as
    # x1 and y1 (or a_x1, a_y1) and then find where its endpoint matches another
    # starting point and move down the line.
    # v.db.select reaches col=cat,a_id,xr1,xr2 where="a_x1 = xr1"

    # First, get the starting coordinates of each stream segment
    # and a set of river ID's (ordered from 1...N)
    colNames = np.array(gscript.vector_db_select(segments, layer=1)["columns"])
    colValues = np.array(
        gscript.vector_db_select(segments, layer=1)["values"].values())
    number_of_segments = colValues.shape[0]
    segment_x1s = colValues[:, colNames == "x1"].astype(float).squeeze()
    segment_y1s = colValues[:, colNames == "y1"].astype(float).squeeze()
    segment_ids = colValues[:, colNames == "id"].astype(float).squeeze()

    # Then move back to the reaches map to produce the ordering
    colNames = np.array(gscript.vector_db_select(reaches, layer=1)["columns"])
    colValues = np.array(
        gscript.vector_db_select(reaches, layer=1)["values"].values())
    reach_cats = colValues[:, colNames == "cat"].astype(int).squeeze()
    reach_x1s = colValues[:, colNames == "xr1"].astype(float).squeeze()
    reach_y1s = colValues[:, colNames == "yr1"].astype(float).squeeze()
    reach_x2s = colValues[:, colNames == "xr2"].astype(float).squeeze()
    reach_y2s = colValues[:, colNames == "yr2"].astype(float).squeeze()
    segment_ids__reach = colValues[:, colNames == "segment_id"].astype(
        float).squeeze()

    for segment_id in segment_ids:
        reach_order_cats = []
        downstream_directed = []
        ssel = segment_ids == segment_id
        rsel = segment_ids__reach == segment_id  # selector
        # Find first segment: x1y1 first here, but not necessarily later
        downstream_directed.append(1)
        _x_match = reach_x1s[rsel] == segment_x1s[ssel]
        _y_match = reach_y1s[rsel] == segment_y1s[ssel]
        _i_match = _x_match * _y_match
        x1y1 = True  # false if x2y2
        # Find cat
        _cat = int(reach_cats[rsel][_x_match * _y_match])
        reach_order_cats.append(_cat)
        # Get end of reach = start of next one
        reach_x_end = float(reach_x2s[reach_cats == _cat])
        reach_y_end = float(reach_y2s[reach_cats == _cat])
        while _i_match.any():
            _x_match = reach_x1s[rsel] == reach_x_end
            _y_match = reach_y1s[rsel] == reach_y_end
            _i_match = _x_match * _y_match
            if _i_match.any():
                _cat = int(reach_cats[rsel][_x_match * _y_match])
                reach_x_end = float(reach_x2s[reach_cats == _cat])
                reach_y_end = float(reach_y2s[reach_cats == _cat])
                reach_order_cats.append(_cat)
        _message = str(len(reach_order_cats)) + " " + str(len(
            reach_cats[rsel]))
        gscript.message(_message)

        # Reach order to database table
        reach_number__reach_order_cats = []
        for i in range(len(reach_order_cats)):
            reach_number__reach_order_cats.append((i + 1, reach_order_cats[i]))
        reachesTopo = VectorTopo(reaches)
        reachesTopo.open("rw")
        cur = reachesTopo.table.conn.cursor()
        cur.executemany(
            "update " + reaches + " set IREACH=? where cat=?",
            reach_number__reach_order_cats,
        )
        reachesTopo.table.conn.commit()
        reachesTopo.close()

    # TOP AND BOTTOM ARE OUT OF ORDER: SOME SEGS ARE BACKWARDS. UGH!!!!
    # NEED TO GET THEM IN ORDER TO GET THE Z VALUES AT START AND END

    # 2018.10.01: Updating this to use the computational region for the DEM
    g.region(raster=elevation)

    # Compute slope and starting elevations from the elevations at the start and
    # end of the reaches and the length of each reach]

    gscript.message("Obtaining elevation values from raster: may take time.")
    v.db_addcolumn(map=reaches,
                   columns="zr1 double precision, zr2 double precision")
    zr1 = []
    zr2 = []
    for i in range(len(reach_cats)):
        _x = reach_x1s[i]
        _y = reach_y1s[i]
        # print _x, _y
        _z = float(
            gscript.parse_command("r.what",
                                  map=elevation,
                                  coordinates=str(_x) + "," +
                                  str(_y)).keys()[0].split("|")[-1])
        zr1.append(_z)
        _x = reach_x2s[i]
        _y = reach_y2s[i]
        _z = float(
            gscript.parse_command("r.what",
                                  map=elevation,
                                  coordinates=str(_x) + "," +
                                  str(_y)).keys()[0].split("|")[-1])
        zr2.append(_z)

    zr1_cats = []
    zr2_cats = []
    for i in range(len(reach_cats)):
        zr1_cats.append((zr1[i], reach_cats[i]))
        zr2_cats.append((zr2[i], reach_cats[i]))

    reachesTopo = VectorTopo(reaches)
    reachesTopo.open("rw")
    cur = reachesTopo.table.conn.cursor()
    cur.executemany("update " + reaches + " set zr1=? where cat=?", zr1_cats)
    cur.executemany("update " + reaches + " set zr2=? where cat=?", zr2_cats)
    reachesTopo.table.conn.commit()
    reachesTopo.close()

    # Use these to create slope -- backwards possible on DEM!
    v.db_update(map=reaches, column="SLOPE", value="(zr1 - zr2)/RCHLEN")
    v.db_update(map=reaches,
                column="SLOPE",
                value=Smin,
                where="SLOPE <= " + str(Smin))

    # srtm_local_filled_grid = srtm_local_filled @ 200m (i.e. current grid)
    #  resolution
    # r.to.vect in=srtm_local_filled_grid out=srtm_local_filled_grid col=z type=area --o#
    # NOT SURE IF IT IS BEST TO USE MEAN ELEVATION OR TOP ELEVATION!!!!!!!!!!!!!!!!!!!!!!!
    v.db_addcolumn(map=reaches, columns="z_topo_mean double precision")
    v.what_rast(map=reaches, raster=elevation,
                column="z_topo_mean")  # , query_column='z')
    v.db_update(map=reaches,
                column="STRTOP",
                value="z_topo_mean -" + str(h_stream),
                quiet=True)
コード例 #27
0
def main():
    """
    Input for GSFLOW
    """

    reg = grass.region()

    options, flags = grass.parser()

    basin_mouth_E = options['E']
    basin_mouth_N = options['N']

    accum_thresh = options['threshold']

    # Create drainage direction, flow accumulation, and rivers

    grass.mapcalc('streams_unthinned = flowAccum > '+str(accum_thresh), overwrite=True)
    grass.run_command('r.null', map='streams_unthinned', setnull=0)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams_raw', type='line', overwrite=True)
    # Clean with a 1-cell threshold to remove loops created when diagonal
    # streams intersect one another
    grass.run_command('v.clean', input='streams_raw', output='streams', tool='snap', threshold=1.42*(grass.region()['nsres'] + grass.region()['ewres'])/2., flags='c', overwrite=True)
    grass.run_command('v.to.rast', input='streams', output='streams_unthinned', use='val', val=1, overwrite=True)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams', type='line', overwrite=True)
    grass.run_command('v.to.rast', input='streams', output='streams', use='cat', overwrite=True)
    

    # Include this?

    ###############
    # PLACEHOLDER #
    ###################################################################
    # To do in near future: limit to this basin
    ###################################################################

    # Next, get the order of basins the old-fashioned way: coordinates of endpoints of lines
    # Because I can't use GRASS to query multiple points
    #grass.run_command('v.extract', input='streams', output='streamSegments', type='line', overwrite=True)
    # Maybe I don't even need nodes! 9/4/16 -- nope, doesn't seem so.
    grass.run_command('g.copy', rast='streams,streamSegments')
    grass.run_command('v.db.addcolumn', map='streamSegments', columns='z double precision, flow_accum double precision, x1 double precision, y1 double precision, x2 double precision, y2 double precision')
    grass.run_command('v.to.db', map='streamSegments', option='start', columns='x1, y1')
    grass.run_command('v.to.db', map='streamSegments', option='end', columns='x2, y2')

    colNames = np.array(grass.vector_db_select('streamSegments')['columns'])
    colValues = np.array(grass.vector_db_select('streamSegments')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # xy1: UPSTREAM
    # xy2: DOWNSTREAM
    # (I checked.)
    # So now can use this information to find headwaters and mouths

    # Not sure that thsi is necessary
    nsegs_at_point_1 = []
    nsegs_at_point_2 = []
    for row in xy1:
      nsegs_at_point_1.append(np.sum( np.prod(xy == row, axis=1)))
    for row in xy2:
      nsegs_at_point_2.append(np.sum( np.prod(xy == row, axis=1)))
    nsegs_at_point_1 = np.array(nsegs_at_point_1)
    nsegs_at_point_2 = np.array(nsegs_at_point_2)
コード例 #28
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """
    import matplotlib  # required by windows
    matplotlib.use('wxAGG')  # required by windows
    from matplotlib import pyplot as plt

    options, flags = gscript.parser()

    # Parsing
    window = float(options['window'])
    accum_mult = float(options['accum_mult'])
    if options['units'] == 'm2':
        accum_label = 'Drainage area [m$^2$]'
    elif options['units'] == 'km2':
        accum_label = 'Drainage area [km$^2$]'
    elif options['units'] == 'cumecs':
        accum_label = 'Water discharge [m$^3$ s$^{-1}$]'
    elif options['units'] == 'cfs':
        accum_label = 'Water discharge [cfs]'
    else:
        accum_label = 'Flow accumulation [$-$]'
    plots = options['plots'].split(',')

    # Attributes of streams
    colNames = np.array(vector_db_select(options['streams'])['columns'])
    colValues = np.array(
        vector_db_select(options['streams'])['values'].values())
    tostream = colValues[:, colNames == 'tostream'].astype(int).squeeze()
    cats = colValues[:,
                     colNames == 'cat'].astype(int).squeeze()  # = "fromstream"

    # We can loop over this list to get the shape of the full river network.
    selected_cats = []
    segment = int(options['cat'])
    selected_cats.append(segment)
    x = []
    z = []
    if options['direction'] == 'downstream':
        # Get network
        gscript.message("Network")
        while selected_cats[-1] != 0:
            selected_cats.append(int(tostream[cats == selected_cats[-1]]))
        x.append(selected_cats[-1])
        selected_cats = selected_cats[:-1]  # remove 0 at end

        # Extract x points in network
        data = vector.VectorTopo(
            options['streams'])  # Create a VectorTopo object
        data.open('r')  # Open this object for reading

        coords = []
        _i = 0
        for i in range(len(data)):
            if isinstance(data.read(i + 1), vector.geometry.Line):
                if data.read(i + 1).cat in selected_cats:
                    coords.append(data.read(i + 1).to_array())
                    gscript.core.percent(_i, len(selected_cats),
                                         100. / len(selected_cats))
                    _i += 1
        gscript.core.percent(1, 1, 1)
        coords = np.vstack(np.array(coords))

        _dx = np.diff(coords[:, 0])
        _dy = np.diff(coords[:, 1])
        x_downstream_0 = np.hstack((0, np.cumsum((_dx**2 + _dy**2)**.5)))
        x_downstream = x_downstream_0.copy()

    elif options['direction'] == 'upstream':
        #terminalCATS = list(options['cat'])
        #while terminalCATS:
        #
        print("Upstream direction not yet active!")
        return
        """
        # Add new lists for each successive upstream river
        river_is_upstream =
        while
        full_river_cats
        """

    # Network extraction
    if options['outstream'] is not '':
        selected_cats_str = list(np.array(selected_cats).astype(str))
        selected_cats_csv = ','.join(selected_cats_str)
        v.extract(input=options['streams'],
                  output=options['outstream'],
                  cats=selected_cats_csv,
                  overwrite=gscript.overwrite())

    # Analysis
    gscript.message("Elevation")
    if options['elevation']:
        _include_z = True
        DEM = RasterRow(options['elevation'])
        DEM.open('r')
        z = []
        _i = 0
        _lasti = 0
        for row in coords:
            z.append(DEM.get_value(Point(row[0], row[1])))
            if float(_i) / len(coords) > float(_lasti) / len(coords):
                gscript.core.percent(_i, len(coords), np.floor(_i - _lasti))
            _lasti = _i
            _i += 1
        DEM.close()
        z = np.array(z)
        if options['window'] is not '':
            x_downstream, z = moving_average(x_downstream_0, z, window)
        gscript.core.percent(1, 1, 1)
    else:
        _include_z = False
    gscript.message("Slope")
    if options['slope']:
        _include_S = True
        slope = RasterRow(options['slope'])
        slope.open('r')
        S = []
        _i = 0
        _lasti = 0
        for row in coords:
            S.append(slope.get_value(Point(row[0], row[1])))
            if float(_i) / len(coords) > float(_lasti) / len(coords):
                gscript.core.percent(_i, len(coords), np.floor(_i - _lasti))
            _lasti = _i
            _i += 1
        slope.close()
        S = np.array(S)
        S_0 = S.copy()
        if options['window'] is not '':
            x_downstream, S = moving_average(x_downstream_0, S, window)
        gscript.core.percent(1, 1, 1)
    else:
        _include_S = False
    gscript.message("Accumulation")
    if options['accumulation']:
        _include_A = True
        accumulation = RasterRow(options['accumulation'])
        accumulation.open('r')
        A = []
        _i = 0
        _lasti = 0
        for row in coords:
            A.append(
                accumulation.get_value(Point(row[0], row[1])) * accum_mult)
            if float(_i) / len(coords) > float(_lasti) / len(coords):
                gscript.core.percent(_i, len(coords), np.floor(_i - _lasti))
            _lasti = _i
            _i += 1
        accumulation.close()
        A = np.array(A)
        A_0 = A.copy()
        if options['window'] is not '':
            x_downstream, A = moving_average(x_downstream_0, A, window)
        gscript.core.percent(1, 1, 1)
    else:
        _include_A = False

    # Plotting
    if 'LongProfile' in plots:
        plt.figure()
        plt.plot(x_downstream / 1000., z, 'k-', linewidth=2)
        plt.xlabel('Distance downstream [km]', fontsize=16)
        plt.ylabel('Elevation [m]', fontsize=20)
        plt.tight_layout()
    if 'SlopeAccum' in plots:
        plt.figure()
        plt.loglog(A, S, 'ko', linewidth=2)
        plt.xlabel(accum_label, fontsize=20)
        plt.ylabel('Slope [$-$]', fontsize=20)
        plt.tight_layout()
    if 'SlopeDistance' in plots:
        plt.figure()
        plt.plot(x_downstream / 1000., S, 'k-', linewidth=2)
        plt.xlabel('Distance downstream [km]', fontsize=16)
        plt.ylabel('Slope [$-$]', fontsize=20)
        plt.tight_layout()
    if 'AccumDistance' in plots:
        plt.figure()
        plt.plot(x_downstream / 1000., A, 'k-', linewidth=2)
        plt.xlabel('Distance downstream [km]', fontsize=16)
        plt.ylabel(accum_label, fontsize=20)
        plt.tight_layout()
    plt.show()

    # Saving data
    if options['outfile_original'] is not '':
        header = ['x_downstream', 'E', 'N']
        outfile = np.hstack((np.expand_dims(x_downstream_0, axis=1), coords))
        if _include_S:
            header.append('slope')
            outfile = np.hstack((outfile, np.expand_dims(S_0, axis=1)))
        if _include_A:
            if (options['units'] == 'm2') or (options['units'] == 'km2'):
                header.append('drainage_area_' + options['units'])
            elif (options['units'] == 'cumecs') or (options['units'] == 'cfs'):
                header.append('water_discharge_' + options['units'])
            else:
                header.append('flow_accumulation_arbitrary_units')
            outfile = np.hstack((outfile, np.expand_dims(A_0, axis=1)))
        header = np.array(header)
        outfile = np.vstack((header, outfile))
        np.savetxt(options['outfile_original'], outfile, '%s')
    if options['outfile_smoothed'] is not '':
        header = ['x_downstream', 'E', 'N']
        # E, N on smoothed grid
        x_downstream, E = moving_average(x_downstream_0, coords[:, 0], window)
        x_downstream, N = moving_average(x_downstream_0, coords[:, 1], window)
        # Back to output
        outfile = np.hstack((np.expand_dims(x_downstream,
                                            axis=1), np.expand_dims(E, axis=1),
                             np.expand_dims(N, axis=1)))
        if _include_S:
            header.append('slope')
            outfile = np.hstack((outfile, np.expand_dims(S, axis=1)))
        if _include_A:
            if (options['units'] == 'm2') or (options['units'] == 'km2'):
                header.append('drainage_area_' + options['units'])
            elif (options['units'] == 'cumecs') or (options['units'] == 'cfs'):
                header.append('water_discharge_' + options['units'])
            else:
                header.append('flow_accumulation_arbitrary_units')
            outfile = np.hstack((outfile, np.expand_dims(A, axis=1)))
        header = np.array(header)
        outfile = np.vstack((header, outfile))
        np.savetxt(options['outfile_smoothed'], outfile, '%s')
コード例 #29
0
ファイル: frame.py プロジェクト: felipebetancur/grass-ci
    def _getSTVDData(self, timeseries):
        """Load data and read properties
        :param list timeseries: a list of timeseries
        """

        mode = None
        unit = None
        cats = None
        attribute = self.attribute.GetValue()
        if self.cats.GetValue() != '':
            cats = self.cats.GetValue().split(',')
        if cats and self.poi:
            GMessage(message=_("Both coordinates and categories are set, "
                               "coordinates will be used. The use categories "
                               "remove text from coordinate form"))
        if not attribute or attribute == '':
            GError(parent=self, showTraceback=False,
                   message=_("With Vector temporal dataset you have to select"
                             " an attribute column"))
            return
        columns = ','.join(['name', 'start_time', 'end_time', 'id', 'layer'])
        for series in timeseries:
            name = series[0]
            fullname = name + '@' + series[1]
            etype = series[2]
            sp = tgis.dataset_factory(etype, fullname)
            if not sp.is_in_db(dbif=self.dbif):
                GError(message=_("Dataset <%s> not found in temporal "
                                 "database") % (fullname), parent=self,
                                 showTraceback=False)
                return
            sp.select(dbif=self.dbif)

            rows = sp.get_registered_maps(dbif=self.dbif, order="start_time",
                                          columns=columns, where=None)

            self.timeDataV[name] = OrderedDict()
            self.timeDataV[name]['temporalDataType'] = etype
            self.timeDataV[name]['temporalType'] = sp.get_temporal_type()
            self.timeDataV[name]['granularity'] = sp.get_granularity()

            if mode is None:
                mode = self.timeDataV[name]['temporalType']
            elif self.timeDataV[name]['temporalType'] != mode:
                GError(parent=self, showTraceback=False,
                       message=_("Datasets have different temporal type ("
                                 "absolute x relative), which is not allowed."))
                return
            self.timeDataV[name]['unit'] = None  # only with relative
            if self.timeDataV[name]['temporalType'] == 'relative':
                start, end, self.timeDataV[name]['unit'] = sp.get_relative_time()
                if unit is None:
                    unit = self.timeDataV[name]['unit']
                elif self.timeDataV[name]['unit'] != unit:
                    GError(message=_("Datasets have different time unit which"
                                     " is not allowed."), parent=self,
                           showTraceback=False)
                    return
            if self.poi:
                self.plotNameListV.append(name)
                # TODO set an appropriate distance, right now a big one is set
                # to return the closer point to the selected one
                out = grass.vector_what(map='pois_srvds',
                                        coord=self.poi.coords(),
                                        distance=10000000000000000)
                if len(out) != len(rows):
                    GError(parent=self, showTraceback=False,
                           message=_("Difference number of vector layers and "
                                     "maps in the vector temporal dataset"))
                    return
                for i in range(len(rows)):
                    row = rows[i]
                    values = out[i]
                    if str(row['layer']) == str(values['Layer']):
                        lay = "{map}_{layer}".format(map=row['name'],
                                                     layer=values['Layer'])
                        self.timeDataV[name][lay] = {}
                        self.timeDataV[name][lay]['start_datetime'] = row['start_time']
                        self.timeDataV[name][lay]['end_datetime'] = row['start_time']
                        self.timeDataV[name][lay]['value'] = values['Attributes'][attribute]
            else:
                wherequery = ''
                cats = self._getExistingCategories(rows[0]['name'], cats)
                totcat = len(cats)
                ncat = 1
                for cat in cats:
                    if ncat == 1 and totcat != 1:
                        wherequery += '{k}={c} or'.format(c=cat, k="{key}")
                    elif ncat == 1 and totcat == 1:
                        wherequery += '{k}={c}'.format(c=cat, k="{key}")
                    elif ncat == totcat:
                        wherequery += ' {k}={c}'.format(c=cat, k="{key}")
                    else:
                        wherequery += ' {k}={c} or'.format(c=cat, k="{key}")

                    catn = "cat{num}".format(num=cat)
                    self.plotNameListV.append("{na}+{cat}".format(na=name,
                                                                  cat=catn))
                    self.timeDataV[name][catn] = OrderedDict()
                    ncat += 1
                for row in rows:
                    lay = int(row['layer'])
                    catkey = self._parseVDbConn(row['name'], lay)
                    if not catkey:
                        GError(parent=self, showTraceback=False,
                           message=_("No connection between vector map {vmap} "
                                     "and layer {la}".format(vmap=row['name'],
                                                              la=lay)))
                        return
                    vals = grass.vector_db_select(map=row['name'], layer=lay,
                                                  where=wherequery.format(key=catkey),
                                                  columns=attribute)
                    layn = "lay{num}".format(num=lay)
                    for cat in cats:
                        catn = "cat{num}".format(num=cat)
                        if layn not in self.timeDataV[name][catn].keys():
                            self.timeDataV[name][catn][layn] = {}
                        self.timeDataV[name][catn][layn]['start_datetime'] = row['start_time']
                        self.timeDataV[name][catn][layn]['end_datetime'] = row['end_time']
                        self.timeDataV[name][catn][layn]['value'] = vals['values'][int(cat)][0]
        self.unit = unit
        self.temporalType = mode
        return
コード例 #30
0
def main():
    """
    Superposition of analytical solutions in gFlex for flexural isostasy in
    GRASS GIS
    """

    options, flags = grass.parser()
    # if just interface description is requested, it will not get to this point
    # so gflex will not be needed

    # GFLEX
    # try to import gflex only after we know that
    # we will actually do the computation
    try:
        import gflex
    except:
        print("")
        print("MODULE IMPORT ERROR.")
        print(
            "In order to run r.flexure or g.flexure, you must download and install"
        )
        print("gFlex. The most recent development version is available from")
        print("https://github.com/awickert/gFlex")
        print("Installation instructions are available on the page.")
        grass.fatal("Software dependency must be installed.")

    ##########
    # SET-UP #
    ##########

    # This code is for 2D flexural isostasy
    flex = gflex.F2D()
    # And show that it is coming from GRASS GIS
    flex.grass = True

    # Method
    flex.Method = "SAS_NG"

    # Parameters that are often changed for the solution
    ######################################################

    # x, y, q
    flex.x, flex.y = get_points_xy(options["input"])
    # xw, yw: gridded output
    if len(
            grass.parse_command("g.list",
                                type="vect",
                                pattern=options["output"])):
        if not grass.overwrite():
            grass.fatal("Vector map '" + options["output"] +
                        "' already exists. Use '--o' to overwrite.")
    # Just check raster at the same time if it exists
    if len(
            grass.parse_command("g.list",
                                type="rast",
                                pattern=options["raster_output"])):
        if not grass.overwrite():
            grass.fatal("Raster map '" + options["raster_output"] +
                        "' already exists. Use '--o' to overwrite.")
    grass.run_command(
        "v.mkgrid",
        map=options["output"],
        type="point",
        overwrite=grass.overwrite(),
        quiet=True,
    )
    grass.run_command(
        "v.db.addcolumn",
        map=options["output"],
        columns="w double precision",
        quiet=True,
    )
    flex.xw, flex.yw = get_points_xy(
        options["output"])  # gridded output coordinates
    vect_db = grass.vector_db_select(options["input"])
    col_names = np.array(vect_db["columns"])
    q_col = col_names == options["column"]
    if np.sum(q_col):
        col_values = np.array(list(vect_db["values"].values())).astype(float)
        flex.q = col_values[:, q_col].squeeze(
        )  # Make it 1D for consistency w/ x, y
    else:
        grass.fatal("provided column name, " + options["column"] +
                    " does not match\nany column in " + options["q0"] + ".")
    # Elastic thickness
    flex.Te = float(options["te"])
    if options["te_units"] == "km":
        flex.Te *= 1000
    elif options["te_units"] == "m":
        pass
    else:
        grass.fatal(
            "Inappropriate te_units. How? Options should be limited by GRASS.")
    flex.rho_fill = float(options["rho_fill"])

    # Parameters that often stay at their default values
    ######################################################
    flex.g = float(options["g"])
    flex.E = float(options["ym"]
                   )  # Can't just use "E" because reserved for "east", I think
    flex.nu = float(options["nu"])
    flex.rho_m = float(options["rho_m"])

    # Set verbosity
    if grass.verbosity() >= 2:
        flex.Verbose = True
    if grass.verbosity() >= 3:
        flex.Debug = True
    elif grass.verbosity() == 0:
        flex.Quiet = True

    # Check if lat/lon and let user know if verbosity is True
    if grass.region_env()[6] == "3":
        flex.latlon = True
        flex.PlanetaryRadius = float(
            grass.parse_command("g.proj", flags="j")["+a"])
        if flex.Verbose:
            print("Latitude/longitude grid.")
            print("Based on r_Earth = 6371 km")
            print(
                "Computing distances between load points using great circle paths"
            )

    ##########
    # SOLVE! #
    ##########

    flex.initialize()
    flex.run()
    flex.finalize()

    # Now to use lower-level GRASS vector commands to work with the database
    # table and update its entries
    # See for help:
    # http://nbviewer.ipython.org/github/zarch/workshop-pygrass/blob/master/02_Vector.ipynb
    w = vector.VectorTopo(options["output"])
    w.open("rw")  # Get ready to read and write
    wdb = w.dblinks[0]
    wtable = wdb.table()
    col = int((np.array(
        wtable.columns.names()) == "w").nonzero()[0])  # update this column
    for i in range(1, len(w) + 1):
        # ignoring 1st column: assuming it will be category (always true here)
        wnewvalues = (list(w[i].attrs.values())[1:col] +
                      tuple([flex.w[i - 1]]) +
                      list(w[i].attrs.values())[col + 1:])
        wtable.update(key=i, values=wnewvalues)
    wtable.conn.commit()  # Save this
    w.close(build=False)  # don't build here b/c it is always verbose
    grass.run_command("v.build", map=options["output"], quiet=True)

    # And raster export
    # "w" vector defined by raster resolution, so can do direct v.to.rast
    # though if this option isn't selected, the user can do a finer-grained
    # interpolation, which shouldn't introduce much error so long as these
    # outputs are spaced at << 1 flexural wavelength.
    if options["raster_output"]:
        grass.run_command(
            "v.to.rast",
            input=options["output"],
            output=options["raster_output"],
            use="attr",
            attribute_column="w",
            type="point",
            overwrite=grass.overwrite(),
            quiet=True,
        )
        # And create a nice colormap!
        grass.run_command("r.colors",
                          map=options["raster_output"],
                          color="differences",
                          quiet=True)
コード例 #31
0
def main():
    """
    Input for GSFLOW
    """

    reg = grass.region()

    options, flags = grass.parser()

    basin_mouth_E = options['E']
    basin_mouth_N = options['N']

    accum_thresh = options['threshold']

    # Create drainage direction, flow accumulation, and rivers

    # Manually create streams from accumulation.
    # The one funny step is the cleaning w/ snap, because r.thin allows cells that are
    # diagonal to each other to be next to each other -- creating boxes along the channel
    # that are not consistenet with stream topology
    grass.mapcalc('streams_unthinned = flowAccum > '+str(accum_thresh), overwrite=True)
    grass.run_command('r.null', map='streams_unthinned', setnull=0)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams_raw', type='line', overwrite=True)
    grass.run_command('v.clean', input='streams_raw', output='streams', tool='snap', threshold=1.42*(grass.region()['nsres'] + grass.region()['ewres'])/2., flags='c', overwrite=True) # threshold is one cell
    grass.run_command('v.to.rast', input='streams', output='streams_unthinned', use='val', val=1, overwrite=True)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams', type='line', overwrite=True)
    grass.run_command('v.to.rast', input='streams', output='streams', use='cat', overwrite=True)
    # Create drainage basins
    grass.run_command('r.stream.basins', direction='drainageDirection', stream_rast='streams', basins='basins', overwrite=True)
    # If there is any more need to work with nodes, I should check the code I wrote for Kelly Monteleone's paper -- this has river identification and extraction, including intersection points.


    # Vectorize drainage basins
    grass.run_command('r.to.vect', input='basins', output='basins', type='area', flags='v', overwrite=True)

    # Then remove all sub-basins and segments that have negative flow accumulation
    # (i.e. have contributions from outside the map)

    ###################################################################
    # Intermediate step: Remove all basins that have offmap flow
    # i.e., those containing cells with negative flow accumulation
    ###################################################################

    # Method 3 -- even easier
    grass.mapcalc("has_offmap_flow = (flowAccum < 0)", overwrite=True)
    grass.run_command('r.null', map='has_offmap_flow', setnull=0)
    grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
    grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
    grass.run_command('v.db.addcolumn', map='has_offmap_flow', columns='badbasin_cats integer')
    grass.run_command('v.what.vect', map='has_offmap_flow', column='badbasin_cats', query_map='basins', query_column='cat', dmax=60)
    colNames = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['columns'])
    # offmap incoming flow points
    colValues = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['values'].values())
    badcats = colValues[:,colNames == 'badbasin_cats'].squeeze()
    badcats = badcats[badcats != '']
    badcats = badcats.astype(int)
    badcats = list(set(list(badcats)))
    # basins for full cat list
    colNames = np.array(grass.vector_db_select('basins', layer=1)['columns'])
    colValues = np.array(grass.vector_db_select('basins', layer=1)['values'].values())
    allcats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    allcats = list(set(list(allcats)))
    # xor to goodcats
    #goodcats = set(badcats).symmetric_difference(allcats)
    # but better in case somehow there are badcats that are not allcats to do NOT
    goodcats = list(set(allcats) - set(badcats))
    goodcats_str = ''
    for cat in goodcats:
      goodcats_str += str(cat) + ','
    goodcats_str = goodcats_str[:-1] # super inefficient but quick
    grass.run_command('g.rename', vect='basins,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='basins', cats=goodcats_str)
    grass.run_command('g.rename', vect='streams,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='streams', cats=goodcats_str)
    #grass.run_command('g.rename', vect='stream_nodes,tmp', overwrite=True)
    #grass.run_command('v.extract', input='tmp', output='stream_nodes', cats=goodcats_str)

    # Fix pixellated pieces -- formerly here due to one-pixel-basin issue
    reg = grass.region()
    grass.run_command('g.rename', vect='basins,basins_messy', overwrite=True)
    grass.run_command('v.clean', input='basins_messy', output='basins', tool='rmarea', threshold=reg['nsres']*reg['ewres'], overwrite=True)

    # Optional, but recommended becuase not all basins need connect:
    # choose a subset of the region in which to do the PRMS calculation
    grass.run_command( 'r.water.outlet', input='drainageDirection', output='studyBasin', coordinates=str(basin_mouth_E)+','+str(basin_mouth_N) , overwrite=True)
    # Vectorize
    grass.run_command( 'r.to.vect', input='studyBasin', output='studyBasin', type='area', overwrite=True)
    # If there are dangling areas (single-pixel?), just drop them. Not sure if this is the best way to do it
    # No check for two equal areas -- if we have this, there are more fundamental problems in defining 
    # a watershed in contiguous units

    #"""
    # ONLY IF MORE THAN ONE STUDY BASIN -- remove small areas
    grass.run_command( 'v.db.addcolumn', map='studyBasin', columns='area_m2 double precision' )
    grass.run_command( 'v.db.dropcolumn', map='studyBasin', columns='label' )
    grass.run_command( 'v.to.db', map='studyBasin', columns='area_m2', option='area', units='meters')
    drainageAreasRaw = sorted( grass.parse_command( 'v.db.select', map='studyBasin', flags='c').keys() ) # could update to grass.vector_db_select
    drainageAreasList = []
    for row in drainageAreasRaw:
      # cat, area
      drainageAreasList.append(row.split('|'))
    drainageAreasOnly = np.array(drainageAreasList).astype(float)
    catsOnly = drainageAreasOnly[:,0].astype(int)
    drainageAreasOnly = drainageAreasOnly[:,1]
    row_with_max_drainage_area = (drainageAreasOnly == np.max(drainageAreasOnly)).nonzero()[0][0]
    cat_with_max_drainage_area = catsOnly[row_with_max_drainage_area]
    grass.run_command('g.rename', vect='studyBasin,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='studyBasin', cats=cat_with_max_drainage_area, overwrite=True)
    grass.run_command('g.remove', type='vector', name='tmp', flags='f')
    grass.run_command('v.to.rast', input='studyBasin', output='studyBasin', use='val', value=1, overwrite=True)
    #"""
    """
    # Remove small areas -- easier, though not as sure, as the method above
    grass.run_command('v.rename', vect='studyBasin,tmp', overwrite=True)
    grass.run_command('v.clean', input='tmp', output='studyBasin', tool='rmarea', threshold=1.01*(grass.region()['nsres'] * grass.region()['ewres']), flags='c', overwrite=True) # threshold is one cell
    """


    ###############
    # PLACEHOLDER #
    ###################################################################
    # To do in near future: limit to this basin
    ###################################################################

    # Next, get the order of basins the old-fashioned way: coordinates of endpoints of lines
    # Because I can't use GRASS to query multiple points
    #grass.run_command('v.extract', input='streams', output='streamSegments', type='line', overwrite=True)
    # Maybe I don't even need nodes! 9/4/16 -- nope, doesn't seem so.
    grass.run_command('g.copy', rast='streams,streamSegments')
    grass.run_command('v.db.addcolumn', map='streamSegments', columns='z double precision, flow_accum double precision, x1 double precision, y1 double precision, x2 double precision, y2 double precision')
    grass.run_command('v.to.db', map='streamSegments', option='start', columns='x1, y1')
    grass.run_command('v.to.db', map='streamSegments', option='end', columns='x2, y2')

    colNames = np.array(grass.vector_db_select('streamSegments')['columns'])
    colValues = np.array(grass.vector_db_select('streamSegments')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # xy1: UPSTREAM
    # xy2: DOWNSTREAM
    # (I checked.)
    # So now can use this information to find headwaters and mouths

    # Not sure that thsi is necessary
    nsegs_at_point_1 = []
    nsegs_at_point_2 = []
    for row in xy1:
      nsegs_at_point_1.append(np.sum( np.prod(xy == row, axis=1)))
    for row in xy2:
      nsegs_at_point_2.append(np.sum( np.prod(xy == row, axis=1)))
    nsegs_at_point_1 = np.array(nsegs_at_point_1)
    nsegs_at_point_2 = np.array(nsegs_at_point_2)


    # HRU's have same numbers as their enclosed segments
    # NOT TRUE IN GENERAL -- JUST FOR THIS CASE WITH SUB-BASINS -- WILL NEED TO FIX IN FUTURE



    #############
    # Now, let's copy/rename the sub-basins to HRU and the streamSegments to segment and give them attributes
    ###########################################################################################################

    # Attributes (in order given in manual)

    # HRU
    hru_columns = []
    # Self ID
    hru_columns.append('id integer') # nhru
    # Basic Physical Attributes (Geometry)
    hru_columns.append('hru_area double precision') # acres (!!!!)
    hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    hru_columns.append('hru_elev double precision') # Mean elevation
    hru_columns.append('hru_lat double precision') # Latitude of centroid
    hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Basic Physical Attributes (Other)
    #hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1
    #hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default.
    # Measured input
    hru_columns.append('outlet_sta integer') # Index of streamflow station at basin outlet:
                                         #   station number if it has one, 0 if not
    #    Note that the below specify projections and note lat/lon; they really seem
    #    to work for any projected coordinates, with _x, _y, in meters, and _xlong, 
    #    _ylat, in feet (i.e. they are just northing and easting). The meters and feet
    #    are not just simple conversions, but actually are required for different
    #    modules in the code, and are hence redundant but intentional.
    hru_columns.append('hru_x double precision') # Easting [m]
    hru_columns.append('hru_xlong double precision') # Easting [feet]
    hru_columns.append('hru_y double precision') # Northing [m]
    hru_columns.append('hru_ylat double precision') # Northing [feet]
    # Streamflow and lake routing
    hru_columns.append('K_coef double precision') # Travel time of flood wave to next downstream segment;
                                                  #   this is the Muskingum storage coefficient
                                                  #   1.0 for reservoirs, diversions, and segments flowing
                                                  #   out of the basin
    hru_columns.append('x_coef double precision') # Amount of attenuation of flow wave;
                                                  #   this is the Muskingum routing weighting factor
                                                  #   range: 0.0--0.5; default 0.2
                                                  #   0 for all segments flowing out of the basin
    hru_columns.append('hru_segment integer') # ID of stream segment to which flow will be routed
                                              #   this is for non-cascade routing (flow goes directly
                                              #   from HRU to stream segment)
    hru_columns.append('obsin_segment integer') # Index of measured streamflow station that replaces
                                                #   inflow to a segment

    # Segments
    segment_columns = []
    # Self ID
    segment_columns.append('id integer') # nsegment
    # Streamflow and lake routing
    segment_columns.append('tosegment integer') # Index of downstream segment to which a segment
                                                #   flows (thus differentiating it from hru_segment,
                                                #   which is for HRU's, though segment and HRU ID's
                                                #   are the same when HRU's are sub-basins

    # PRODUCE THE DATA TABLES
    ##########################

    # Create strings
    hru_columns = ",".join(hru_columns)
    segment_columns = ",".join(segment_columns)

    #"""
    # Copy
    grass.run_command('g.copy', vect='basins,HRU', overwrite=True)
    grass.run_command('g.copy', vect='streamSegments,segment', overwrite=True)
    #"""

    # Rename / subset
    """
    # OR GO BACK TO HRU_messy
    grass.run_command('v.overlay', ainput='basins', binput='studyBasin', operator='and', output='HRU_messy', overwrite=True)
    grass.run_command('v.overlay', ainput='streamSegments', binput='studyBasin', operator='and', output='segment_messy', overwrite=True)
    # And clean as well
    grass.run_command('v.clean', input='HRU_messy', output='HRU', tool='rmarea', threshold=reg['nsres']*reg['ewres']*40, overwrite=True)
    grass.run_command('v.clean', input='segment_messy', output='segment', tool='rmdangle', threshold=reg['nsres']*2, overwrite=True)
    # And now that the streams and HRU's no longer have the same cat values, fix 
    # this.
    grass.run_command('v.db.droptable', map='HRU', flags='f')
    grass.run_command('v.db.droptable', map='segment', flags='f')
    #grass.run_command('v.category', input='HRU', option='del', cat='-1', out='tmp', overwrite=True)
    #grass.run_command('v.category', input='tmp', option='add', out='HRU' overwrite=True)
    grass.run_command('v.db.addtable', map='HRU')
    grass.run_command('v.db.addtable', map='segment')

    grass.run_comm


    v.clean HRU
    v.clean
    v
    v.what.vect 
    """

    #grass.run_command('v.clean', input='segment_messy', output='HRU', tool='rmarea', threshold=reg['nsres']*reg['ewres']*20, overwrite=True)


    # Add columns to tables
    grass.run_command('v.db.addcolumn', map='HRU', columns=hru_columns)
    grass.run_command('v.db.addcolumn', map='segment', columns=segment_columns)


    # Produce the data table entries
    ##################################

    """
    # ID numbers
    # There should be a way to do this all at once, but...
    for i in range(len(cats)):
      grass.run_command('v.db.update', map='HRU', column='id', value=nhru[i], where='cat='+str(cats[i]))
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    for i in range(len(cats)):
      grass.run_command('v.db.update', map='segment', column='id', value=nsegment[i], where='cat='+str(cats[i]))
    """

    nhru = np.arange(1, xy1.shape[0]+1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
    # Access the HRU's 
    hru = VectorTopo('HRU')
    # Open the map with topology:
    hru.open('rw')
    # Create a cursor
    cur = hru.table.conn.cursor()
    # Use it to loop across the table
    cur.executemany("update HRU set id=? where cat=?", nhrut)
    # Commit changes to the table
    hru.table.conn.commit()
    # Close the table
    hru.close()

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # Same for segments
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general

    # Somehow only works after I v.clean, not right after v.overlay
    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set id=? where cat=?", nsegmentt)
    segment.table.conn.commit()
    segment.close()

    #hru_columns.append('hru_area double precision')
    grass.run_command('v.to.db', map='HRU', option='area', columns='hru_area', units='acres')

    # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN

    # hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    # hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Slope
    grass.run_command('r.slope.aspect', elevation='srtm', slope='tmp', aspect='aspect', format='percent', overwrite=True) # zscale=0.01 also works to make percent be decimal 0-1
    grass.mapcalc('slope = tmp / 100.', overwrite=True)
    grass.run_command('v.rast.stats', map='HRU', raster='slope', method='average', column_prefix='tmp', flags='c')
    grass.run_command('v.db.update', map='HRU', column='hru_slope', query_column='tmp_average')
    grass.run_command('v.db.dropcolumn', map='HRU', column='tmp_average')
    # Dealing with conversion from degrees (no good average) to something I can
    # average -- x- and y-vectors
    # Geographic coordinates, so sin=x, cos=y.... not that it matters so long 
    # as I am consistent in how I return to degrees
    grass.mapcalc('aspect_x = sin(aspect)', overwrite=True)
    grass.mapcalc('aspect_y = cos(aspect)', overwrite=True)
    #grass.run_command('v.db.addcolumn', map='HRU', columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer')
    grass.run_command('v.rast.stats', map='HRU', raster='aspect_x', method='sum', column_prefix='aspect_x', flags='c')
    grass.run_command('v.rast.stats', map='HRU', raster='aspect_y', method='sum', column_prefix='aspect_y', flags='c')
    # Not actually needed, but maybe good to know
    #grass.run_command('v.rast.stats', map='HRU', raster='aspect_y', method='number', column_prefix='tmp', flags='c')
    #grass.run_command('v.db.renamecolumn', map='HRU', column='tmp_number,ncells_in_hru')
    # NO TRIG FUNCTIONS IN SQLITE!
    #grass.run_command('v.db.update', map='HRU', column='hru_aspect', query_column='DEGREES(ATN2(aspect_y_sum, aspect_x_sum))') # Getting 0, why?
    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" %hru.name)
    _arr = np.array(cur.fetchall())
    _cat = _arr[:,0]
    _aspect_x_sum = _arr[:,1]
    _aspect_y_sum = _arr[:,2]
    aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180./np.pi
    aspect_angle[aspect_angle < 0] += 360 # all positive
    aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose()
    cur.executemany("update HRU set hru_aspect=? where cat=?", aspect_angle_cat)
    hru.table.conn.commit()
    hru.close()

    # hru_columns.append('hru_elev double precision') # Mean elevation
    grass.run_command('v.rast.stats', map='HRU', raster='srtm', method='average', column='tmp', flags='c')
    grass.run_command('v.db.update', map='HRU', column='hru_elev', query_column='tmp_average')
    grass.run_command('v.db.dropcolumn', map='HRU', column='tmp_average')

    # get x,y of centroid -- but have areas not in database table, that do have
    # centroids, and having a hard time finding a good way to get rid of them!
    # They have duplicate category values!
    # Perhaps these are little dangles on the edges of the vectorization where
    # the raster value was the same but pinched out into 1-a few cells?
    # From looking at map, lots of extra centroids on area boundaries, and removing
    # small areas (though threshold hard to guess) gets rid of these

    """
    g.copy vect=HRU,HRUorig # HACK!!!
    v.clean in=HRUorig out=HRU tool=rmarea --o thresh=15000
    """

    #grass.run_command( 'g.rename', vect='HRU,HRU_too_many_centroids')
    #grass.run_command( 'v.clean', input='HRU_too_many_centroids', output='HRU', tool='rmdac')
    grass.run_command('v.db.addcolumn', map='HRU', columns='centroid_x double precision, centroid_y double precision')
    grass.run_command( 'v.to.db', map='HRU', type='centroid', columns='centroid_x, centroid_y', option='coor', units='meters')

    # hru_columns.append('hru_lat double precision') # Latitude of centroid
    colNames = np.array(grass.vector_db_select('HRU', layer=1)['columns'])
    colValues = np.array(grass.vector_db_select('HRU', layer=1)['values'].values())
    xy = colValues[:,(colNames=='centroid_x') + (colNames=='centroid_y')]
    np.savetxt('_xy.txt', xy, delimiter='|', fmt='%s')
    grass.run_command('m.proj', flags='od', input='_xy.txt', output='_lonlat.txt', overwrite=True)
    lonlat = np.genfromtxt('_lonlat.txt', delimiter='|',)[:,:2]
    lonlat_cat = np.concatenate((lonlat, np.expand_dims(_cat, 2)), axis=1)

    # why not just get lon too?
    grass.run_command('v.db.addcolumn', map='HRU', columns='hru_lon double precision')

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_lon=?, hru_lat=? where cat=?", lonlat_cat)
    hru.table.conn.commit()
    hru.close()

    # Easting and Northing for other columns
    grass.run_command('v.db.update', map='HRU', column='hru_x', query_column='centroid_x')
    grass.run_command('v.db.update', map='HRU', column='hru_xlong', query_column='centroid_x*3.28084') # feet
    grass.run_command('v.db.update', map='HRU', column='hru_y', query_column='centroid_y')
    grass.run_command('v.db.update', map='HRU', column='hru_ylat', query_column='centroid_y*3.28084') # feet


    # Streamflow and lake routing
    # tosegment
    """
    # THIS IS THE NECESSARY PART
    # CHANGED (BELOW) TO RE-DEFINE NUMBERS IN SEQUENCE AS HRU'S INSTEAD OF USING
    # THE CAT VALUES
    # Get the first channels in the segment
    tosegment = np.zeros(len(cats)) # default to 0 if they do not flow to another segment
    # Loop over all segments
    #for i in range(len(cats)):
    # From outlet segment
    for i in range(len(xy2)):
      # to inlet segment
      inlets = np.prod(xy1 == xy2[i], axis=1)
      # Update inlet segments with ID of outlets
      tosegment[inlets.nonzero()] = cats[i]
    tosegment_cat = tosegment.copy()
    """

    tosegment_cats = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    tosegment = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    # From outlet segment
    for i in range(len(xy2)):
      # to outlet segment
      outlets = np.prod(xy2 == xy1[i], axis=1)
      # Update outlet segments with ID of inlets
      tosegment[outlets.nonzero()] = nhru[i]
      tosegment_cats[outlets.nonzero()] = cats[i]

    """
      # BACKWARDS!
      # to inlet segment
      inlets = np.prod(xy1 == xy2[i], axis=1)
      # Update inlet segments with ID of outlets
      tosegment_cats[inlets.nonzero()] = cats[i]
    """

    # Now, just update tosegment (segments) and hru_segment (hru's)
    # In this case, they are the same.
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general
    # Tuple for upload to SQL
    # 0 is the default value if it doesn't go into any other segment (i.e flows
    # off-map)
    tosegmentt = []
    tosegment_cats_t = []
    for i in range(len(nsegment)):
      tosegmentt.append( (tosegment[i], nsegment[i]) )
      tosegment_cats_t.append( (tosegment_cats[i], cats[i]) )
    # Once again, special case
    hru_segmentt = tosegmentt

    # Loop check!
    # Weak loop checker - will only detect direct ping-pong.
    loops = []
    tosegmenta = np.array(tosegmentt)
    for i in range(len(tosegmenta)):
      for j in range(len(tosegmenta)):
        if (tosegmenta[i] == tosegmenta[j][::-1]).all():
          loops.append(tosegmenta[i])

    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set tosegment=? where id=?", tosegmentt)
    segment.table.conn.commit()
    segment.close()

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_segment=? where id=?", hru_segmentt)
    hru.table.conn.commit()
    hru.close()


    #grass.run_command('g.rename', vect='HRU_all_2,HRU', overwrite=True)
    #grass.run_command('g.rename', vect='segment_all_2,segment', overwrite=True)

    # In study basin?
    grass.run_command('v.db.addcolumn', map='segment', columns='in_study_basin int')
    grass.run_command('v.db.addcolumn', map='HRU', columns='in_study_basin int')
    grass.run_command('v.what.vect', map='segment', column='in_study_basin', query_map='studyBasin', query_column='value')
    grass.run_command('v.what.vect', map='HRU', column='in_study_basin', query_map='segment', query_column='in_study_basin')

    # Save global segment+HRU
    grass.run_command('g.rename', vect='HRU,HRU_all')
    grass.run_command('g.rename', vect='segment,segment_all')

    # Output HRU -- will need to ensure that this is robust!
    grass.run_command('v.extract', input='HRU_all', output='HRU', where='in_study_basin=1', overwrite=True)
    grass.run_command('v.extract', input='segment_all', output='segment', where='in_study_basin=1', overwrite=True)


    colNames = np.array(grass.vector_db_select('segment')['columns'])
    colValues = np.array(grass.vector_db_select('segment')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # Redo nhru down here
    nhru = np.arange(1, xy1.shape[0]+1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
      """
      n = 1
      if i != 1:
        nhrut.append( (n, cats[i]) )
        n += 1
      """
      
    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set id=? where cat=?", nhrut)
    hru.table.conn.commit()
    hru.close()

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # Same for segments
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general

    # Somehow only works after I v.clean, not right after v.overlay
    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set id=? where cat=?", nsegmentt)
    segment.table.conn.commit()
    segment.close()


    tosegment_cats = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    tosegment = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    # From outlet segment
    for i in range(len(xy2)):
      # to outlet segment
      outlets = np.prod(xy2 == xy1[i], axis=1)
      # Update outlet segments with ID of inlets
      tosegment[outlets.nonzero()] = nhru[i]
      tosegment_cats[outlets.nonzero()] = cats[i]

    # Now, just update tosegment (segments) and hru_segment (hru's)
    # In this case, they are the same.
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general
    # Tuple for upload to SQL
    # 0 is the default value if it doesn't go into any other segment (i.e flows
    # off-map)
    tosegmentt = []
    tosegment_cats_t = []
    for i in range(len(nsegment)):
      tosegmentt.append( (tosegment[i], nsegment[i]) )
      tosegment_cats_t.append( (tosegment_cats[i], cats[i]) )
    # Once again, special case
    hru_segmentt = tosegmentt

    # Loop check!
    # Weak loop checker - will only detect direct ping-pong.
    loops = []
    tosegmenta = np.array(tosegmentt)
    for i in range(len(tosegmenta)):
      for j in range(len(tosegmenta)):
        if (tosegmenta[i] == tosegmenta[j][::-1]).all():
          loops.append(tosegmenta[i])


    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set tosegment=? where id=?", tosegmentt)
    segment.table.conn.commit()
    segment.close()

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_segment=? where id=?", hru_segmentt)
    hru.table.conn.commit()
    hru.close()

    # More old-fashioned way:
    os.system('v.db.select segment sep=comma > segment.csv')
    os.system('v.db.select HRU sep=comma > HRU.csv')
    # and then sort by id, manually
    # And then manually change the last segment's "tosegment" to 0.
    # Except in this case, it was 0!
    # Maybe I managed to do this automatically above... but tired and late, 
    # so will check later
    # but hoping I did something right by re-doing all of the above before
    # saving (and doing so inside this smaller basin)

    print ""
    print "PRMS PORTION COMPLETE."
    print ""



    ###########
    # MODFLOW #
    ###########

    print ""
    print "STARTING MODFLOW PORTION."
    print ""

    # Generate coarse box for MODFLOW (ADW, 4 September, 2016)

    grass.run_command('g.region', rast='srtm')
    grass.run_command('g.region', n=7350000, s=7200000, w=170000, e=260000)
    reg = grass.region()
    MODFLOWres = 2000.
    grass.run_command('v.to.rast', input='HRU', output='allHRUs', use='val', val=1.0, overwrite=True)
    grass.run_command('r.null', map='allHRUs', null='0')
    grass.run_command('r.colors', map='allHRUs', color='grey', flags='n')
    grass.run_command('g.region', res=MODFLOWres)
    grass.run_command('r.resamp.stats', method='average', input='allHRUs', output='fraction_of_HRU_in_MODFLOW_cell', overwrite=True)
    grass.run_command('r.colors', map='fraction_of_HRU_in_MODFLOW_cell', color='grey', flags='n')


    print ""
    print "MODFLOW PORTION COMPLETE."
    print ""
コード例 #32
0
def main():
    """
    Builds river segments for input to the USGS hydrologic models
    PRMS and GSFLOW.
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()

    # I/O
    streams = options["input"]
    segments = options["output"]

    # Hydraulic geometry
    ICALC = int(options["icalc"])

    # ICALC=0: Constant depth
    WIDTH1 = options["width1"]
    WIDTH2 = options["width2"]

    # ICALC=1,2: Manning (in channel and overbank): below

    # ICALC=3: Power-law relationships (following Leopold and others)
    # The at-a-station default exponents are from Rhodes (1977)
    CDPTH = str(float(options["cdpth"]) / 35.3146667)  # cfs to m^3/s
    FDPTH = options["fdpth"]
    AWDTH = str(float(options["awdth"]) / 35.3146667)  # cfs to m^3/s
    BWDTH = options["bwdth"]

    ##################################################
    # CHECKING DEPENDENCIES WITH OPTIONAL PARAMETERS #
    ##################################################

    if ICALC == 3:
        if CDPTH and FDPTH and AWDTH and BWDTH:
            pass
        else:
            gscript.fatal("Missing CDPTH, FDPTH, AWDTH, and/or BWDTH. \
                         These are required when ICALC = 3.")

    ###########
    # RUNNING #
    ###########

    # New Columns for Segments
    segment_columns = []
    # Self ID
    segment_columns.append("id integer")  # segment number
    segment_columns.append("ISEG integer")  # segment number
    segment_columns.append("NSEG integer")  # segment number
    # for GSFLOW
    segment_columns.append(
        "ICALC integer"
    )  # 1 for channel, 2 for channel+fp, 3 for power function
    segment_columns.append(
        "OUTSEG integer")  # downstream segment -- tostream, renumbered
    segment_columns.append("ROUGHCH double precision")  # overbank roughness
    segment_columns.append("ROUGHBK double precision")  # in-channel roughness
    segment_columns.append("WIDTH1 double precision")  # overbank roughness
    segment_columns.append("WIDTH2 double precision")  # in-channel roughness
    segment_columns.append("CDPTH double precision")  # depth coeff
    segment_columns.append("FDPTH double precision")  # depth exp
    segment_columns.append("AWDTH double precision")  # width coeff
    segment_columns.append("BWDTH double precision")  # width exp
    segment_columns.append(
        "floodplain_width double precision"
    )  # floodplain width (8-pt approx channel + flat fp)
    # The below will be all 0
    segment_columns.append(
        "IUPSEG varchar")  # upstream segment ID number, for diversions
    segment_columns.append("FLOW varchar")
    segment_columns.append("RUNOFF varchar")
    segment_columns.append("ETSW varchar")
    segment_columns.append("PPTSW varchar")

    segment_columns = ",".join(segment_columns)

    # CONSIDER THE EFFECT OF OVERWRITING COLUMNS -- WARN FOR THIS
    # IF MAP EXISTS ALREADY?

    # Create a map to work with
    g.copy(vector=(streams, segments), overwrite=gscript.overwrite())
    # and add its columns
    v.db_addcolumn(map=segments, columns=segment_columns)

    # Produce the data table entries
    ##################################
    colNames = np.array(gscript.vector_db_select(segments, layer=1)["columns"])
    colValues = np.array(
        gscript.vector_db_select(segments, layer=1)["values"].values())
    number_of_segments = colValues.shape[0]
    cats = colValues[:, colNames == "cat"].astype(int).squeeze()

    nseg = np.arange(1, len(cats) + 1)
    nseg_cats = []
    for i in range(len(cats)):
        nseg_cats.append((nseg[i], cats[i]))

    segmentsTopo = VectorTopo(segments)
    segmentsTopo.open("rw")
    cur = segmentsTopo.table.conn.cursor()

    # id = cat (as does ISEG and NSEG)
    cur.executemany("update " + segments + " set id=? where cat=?", nseg_cats)
    cur.executemany("update " + segments + " set ISEG=? where cat=?",
                    nseg_cats)
    cur.executemany("update " + segments + " set NSEG=? where cat=?",
                    nseg_cats)

    # outseg = tostream: default is 0 if "tostream" is off-map
    cur.execute("update " + segments + " set OUTSEG=0")
    cur.executemany("update " + segments + " set OUTSEG=? where tostream=?",
                    nseg_cats)

    # Hydraulic geometry selection
    cur.execute("update " + segments + " set ICALC=" + str(ICALC))
    segmentsTopo.table.conn.commit()
    segmentsTopo.close()
    if ICALC == 0:
        gscript.message("")
        gscript.message("ICALC=0 (constant) not supported")
        gscript.message("Continuing nonetheless.")
        gscript.message("")
    if ICALC == 1:
        if options["width_points"] is not "":
            # Can add machinery here for separate upstream and downstream widths
            # But really should not vary all that much
            # v.to_db(map=segments, option='start', columns='xr1,yr1')
            # v.to_db(map=segments, option='end', columns='xr2,yr2')
            gscript.run_command(
                "v.distance",
                from_=segments,
                to=options["width_points"],
                upload="to_attr",
                to_column=options["width_points_col"],
                column="WIDTH1",
            )
            v.db_update(map=segments, column="WIDTH2", query_column="WIDTH1")
        else:
            segmentsTopo = VectorTopo(segments)
            segmentsTopo.open("rw")
            cur = segmentsTopo.table.conn.cursor()
            cur.execute("update " + segments + " set WIDTH1=" + str(WIDTH1))
            cur.execute("update " + segments + " set WIDTH2=" + str(WIDTH2))
            segmentsTopo.table.conn.commit()
            segmentsTopo.close()
    if ICALC == 2:
        # REMOVE THIS MESSAGE ONCE THIS IS INCLUDED IN INPUT-FILE BUILDER
        gscript.message("")
        gscript.message("ICALC=2 (8-point channel + floodplain) not supported")
        gscript.message("Continuing nonetheless.")
        gscript.message("")
        if options["fp_width_pts"] is not "":
            gscript.run_command(
                "v.distance",
                from_=segments,
                to=options["fp_width_pts"],
                upload="to_attr",
                to_column=options["fp_width_pts_col"],
                column="floodplain_width",
            )
        else:
            segmentsTopo = VectorTopo(segments)
            segmentsTopo.open("rw")
            cur = segmentsTopo.table.conn.cursor()
            cur.execute("update " + segments + " set floodplain_width=" +
                        str(options["fp_width_value"]))
            segmentsTopo.table.conn.commit()
            segmentsTopo.close()
    if ICALC == 3:
        segmentsTopo = VectorTopo(segments)
        segmentsTopo.open("rw")
        cur = segmentsTopo.table.conn.cursor()
        cur.execute("update " + segments + " set CDPTH=" + str(CDPTH))
        cur.execute("update " + segments + " set FDPTH=" + str(FDPTH))
        cur.execute("update " + segments + " set AWDTH=" + str(AWDTH))
        cur.execute("update " + segments + " set BWDTH=" + str(BWDTH))
        segmentsTopo.table.conn.commit()
        segmentsTopo.close()

    # values that are 0
    gscript.message("")
    gscript.message("NOTICE: not currently used:")
    gscript.message("IUPSEG, FLOW, RUNOFF, ETSW, and PPTSW.")
    gscript.message("All set to 0.")
    gscript.message("")

    segmentsTopo = VectorTopo(segments)
    segmentsTopo.open("rw")
    cur = segmentsTopo.table.conn.cursor()
    cur.execute("update " + segments + " set IUPSEG=" + str(0))
    cur.execute("update " + segments + " set FLOW=" + str(0))
    cur.execute("update " + segments + " set RUNOFF=" + str(0))
    cur.execute("update " + segments + " set ETSW=" + str(0))
    cur.execute("update " + segments + " set PPTSW=" + str(0))
    segmentsTopo.table.conn.commit()
    segmentsTopo.close()

    # Roughness
    # ICALC=1,2: Manning (in channel)
    if (options["roughch_raster"] is not "") and (options["roughch_points"]
                                                  is not ""):
        gscript.fatal(
            "Choose either a raster or vector or a value as Manning's n input."
        )
    if options["roughch_raster"] is not "":
        ROUGHCH = options["roughch_raster"]
        v.rast_stats(
            raster=ROUGHCH,
            method="average",
            column_prefix="tmp",
            map=segments,
            flags="c",
        )
        # v.db_renamecolumn(map=segments, column='tmp_average,ROUGHCH', quiet=True)
        v.db_update(map=segments,
                    column="ROUGHCH",
                    query_column="tmp_average",
                    quiet=True)
        v.db_dropcolumn(map=segments, columns="tmp_average", quiet=True)
    elif options["roughch_points"] is not "":
        ROUGHCH = options["roughch_points"]
        gscript.run_command(
            "v.distance",
            from_=segments,
            to=ROUGHCH,
            upload="to_attr",
            to_column=options["roughch_pt_col"],
            column="ROUGHCH",
        )
    else:
        segmentsTopo = VectorTopo(segments)
        segmentsTopo.open("rw")
        cur = segmentsTopo.table.conn.cursor()
        ROUGHCH = options["roughch_value"]
        cur.execute("update " + segments + " set ROUGHCH=" + str(ROUGHCH))
        segmentsTopo.table.conn.commit()
        segmentsTopo.close()

    # ICALC=2: Manning (overbank)
    if (options["roughbk_raster"] is not "") and (options["roughbk_points"]
                                                  is not ""):
        gscript.fatal(
            "Choose either a raster or vector or a value as Manning's n input."
        )
    if options["roughbk_raster"] is not "":
        ROUGHBK = options["roughbk_raster"]
        v.rast_stats(
            raster=ROUGHBK,
            method="average",
            column_prefix="tmp",
            map=segments,
            flags="c",
        )
        v.db_renamecolumn(map=segments,
                          column="tmp_average,ROUGHBK",
                          quiet=True)
    elif options["roughbk_points"] is not "":
        ROUGHBK = options["roughbk_points"]
        gscript.run_command(
            "v.distance",
            from_=segments,
            to=ROUGHBK,
            upload="to_attr",
            to_column=options["roughbk_pt_col"],
            column="ROUGHBK",
        )
    else:
        segmentsTopo = VectorTopo(segments)
        segmentsTopo.open("rw")
        cur = segmentsTopo.table.conn.cursor()
        ROUGHBK = options["roughbk_value"]
        cur.execute("update " + segments + " set ROUGHBK=" + str(ROUGHBK))
        segmentsTopo.table.conn.commit()
        segmentsTopo.close()
コード例 #33
0
def main():
    """
    Creates a hydrologically correct MODFLOW grid that inlcudes minimum
    DEM elevations for all stream cells and mean elevations everywhere else
    """
    """
    dem = 'DEM'
    grid = 'grid_tmp'
    streams = 'streams_tmp'
    streams_MODFLOW = 'streams_tmp_MODFLOW'
    DEM_MODFLOW = 'DEM_coarse'
    resolution = 500
    """

    options, flags = gscript.parser()
    dem = options['dem']
    grid = options['grid']
    streams = options['streams']
    #resolution = float(options['resolution'])
    streams_MODFLOW = options['streams_modflow']
    DEM_MODFLOW = options['dem_modflow']

    gscript.use_temp_region()

    # Get number of rows and columns
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:, colNames == 'row'].astype(int).squeeze()
    cols = colValues[:, colNames == 'col'].astype(int).squeeze()
    nRows = np.max(rows)
    nCols = np.max(cols)

    # Set the region
    g.region(vector=grid, rows=nRows, cols=nCols)

    #g.region(raster=dem)
    v.to_rast(input=streams,
              output=streams_MODFLOW,
              use='val',
              value=1.0,
              type='line',
              overwrite=gscript.overwrite(),
              quiet=True)
    r.mapcalc('tmp' + " = " + streams_MODFLOW + " * DEM", overwrite=True)
    g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True)
    #g.region(res=resolution, quiet=True)
    r.resamp_stats(input=streams_MODFLOW,
                   output=streams_MODFLOW,
                   method='minimum',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.resamp_stats(input=dem,
                   output=DEM_MODFLOW,
                   method='average',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW,
            output=DEM_MODFLOW,
            overwrite=True,
            quiet=True)
コード例 #34
0
ファイル: m.swim.subbasins.py プロジェクト: mwort/m.swim
    def __init__(self, **optionsandflags):
        '''Process all arguments and prepare processing'''
        # add all options and flags as attributes (only nonempty ones)
        self.options = interpret_options(optionsandflags)
        self.__dict__.update(self.options)

        # save region for convenience
        self.region = grass.region()
        self.region['kmtocell'] = 10**6 / (self.region['ewres'] * self.region['nsres'])
        self.region['celltokm'] = self.region['ewres'] * self.region['nsres'] * 1e-6

        # check if DEM to processed or if all inputs set
        if not self.is_set('accumulation', 'drainage', 'streams'):
            grass.fatal('Either of these not set: accumulation, drainage, streams.')

        # lothresh default
        if 'lothresh' not in self.options:
            self.lothresh = self.upthresh * 0.05

        # what to do with upthresh
        if self.is_set('upthreshcolumn'):
            gm('Will look for upper thresholds in the %s column.' %
               self.upthreshcolumn)
            # get thresholds from column in station vect
            try:
                threshs = grass.vector_db_select(
                          self.stations, columns=self.upthreshcolumn)['values']
                self.upthresh = OrderedDict([(k, float(v[0]))
                                             for k, v in sorted(threshs.items())])
            except:
                grass.fatal('Cant read the upper threshold from the column %s'
                            % self.upthreshcolumn)

        # streamthresh
        if 'streamthresh' in self.options:
            # convert to cells
            self.streamthresh = self.region['kmtocell'] * self.streamthresh
            # check if reasonable
            fract = float(self.streamthresh) / self.region['cells']
            if fract > 0.5 or fract < 0.01:
                gwarn('streamthresh is %s percent of the region size!' % (fract*100))
        else:
            self.streamthresh = int(self.region['cells'] * 0.02)

        # if no r.watershed flags given
        if 'rwatershedflags' not in self.options:
            self.rwatershedflags = 's'
        if 'rwatershedmemory' in self.options:
            self.rwatershedflags += 'm'
        else:
            # default value/not used
            self.rwatershedmemory = 300

        # check input for stats print
        if self.s:
            for o in ['streams', 'stations', 'catchmentprefix']:
                if not self.is_set(o):
                    grass.fatal('%s needs to be set!')
            # get all catchments
            rst = grass.list_strings('rast', self.catchmentprefix+'*')
            rolist = [(int(r.split('@')[0].replace(self.catchmentprefix, '')), r)
                      for r in sorted(rst) if '__' not in r]
            self.catchment_rasters = OrderedDict(rolist)
            gm('Found these catchments %s' % self.catchment_rasters)
            # calculate station topology
            self.snap_stations()
            self.get_stations_topology()

        # initialise subbasinsdone
        self.subbasinsdone = {}

        return
コード例 #35
0
  except:
    try:
      grass.run_command('g.region', rast='topo_000000')
      grass.mapcalc('discharge_to_coast_'+age+' = abs('+'ocean_plus_shore_'+age+' + accumulation_ice_'+age+')', overwrite=True)
      grass.run_command('r.null', map='discharge_to_coast_'+age, setnull=0) # speeds up     pass
      grass.run_command('g.region', w=-180, e=180)
      grass.run_command('r.to.vect', input='discharge_to_coast_'+age, output='discharge_to_coast_'+age, type='point', column='discharge_m3_s', overwrite=True)
    except:
      print age, 'ERROR'
  try:
    # Top commented because....
    # This works only if it is lat/lon; will fail for projected grids
    #grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_x,to_y', column='sea_grid_lon,sea_grid_lat')
    # Uses stored values to work for projected grids
    grass.run_command('v.db.addcolumn', map='discharge_to_coast_'+age, columns='sea_grid_lon double precision, sea_grid_lat double precision')
    tmp = grass.vector_db_select('discharge_to_coast_'+age, columns='sea_grid_lat').values()[0].values()
    if tmp[0] == ['']:
      if isll:
        grass.run_command('v.distance', _from='discharge_to_coast_'+age, to='sea_grid_points', upload='to_x,to_y', column='sea_grid_lon,sea_grid_lat')
      else:
        try:
          grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='lon', column='sea_grid_lon')
        except:
          grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='x', column='sea_grid_lon')
        try:
          grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='lat', column='sea_grid_lat')
        except:
          grass.run_command('v.distance', from_='discharge_to_coast_'+age, to='sea_grid_points', upload='to_attr', to_column='y', column='sea_grid_lat')
  except:
    print "No discharge points!"
print ""
コード例 #36
0
import grass.script as grass
import csv
import os
os.chdir("C:/repositories/codeRepo/sol")

# get the device ids
idvals = dict.keys(grass.vector_db_select('nodes@PERMANENT', 
                                          columns = 'deviceid')['values'])
minelevdict = {}
for id in idvals:
  # extract a point
  grass.run_command('v.extract', input='nodes@PERMANENT', output='anode@PERMANENT', 
                    type='point', where='deviceid = ' + str(id))
  # buffer the point
  grass.run_command('v.buffer', input='anode@PERMANENT', output='abuff@PERMANENT',
                    type='point', distance=1500)
  # add an attribute table (screw you GRASS!)
  grass.run_command('v.db.addtable', map='abuff@PERMANENT')
  # get the minimum elevation in the buffer zone
  grass.run_command('v.rast.stats', vector='abuff@PERMANENT', 
                    raster='BORR_DEM5ft@PERMANENT', colprefix='elev')
  # get the neighborhood minimum elevation
  minelevdict[id] = grass.vector_db_select('abuff@PERMANENT', 
                                      columns = 'elev_min')['values'][1][0]
  # clean up
  grass.run_command("g.remove", flags="f", vect="anode@PERMANENT,abuff@PERMANENT")
# export values to csv file
with open("minelevs1500ft.csv", "w") as outfile:
  w = csv.writer(outfile)
  w.writerow(['deviceid', 'nbhdminelev'])
  for key, val in minelevdict.items():
コード例 #37
0
def main():
    color = options["color"]
    column = options["column"]
    layer = options["layer"]
    map = options["map"]
    range = options["range"]
    raster = options["raster"]
    rgb_column = options["rgb_column"]
    rules = options["rules"]
    flip = flags["n"]

    global tmp, tmp_colr, tmp_vcol
    pid = os.getpid()
    tmp = tmp_colr = tmp_vcol = None

    mapset = grass.gisenv()["MAPSET"]
    gisbase = os.getenv("GISBASE")

    # does map exist in CURRENT mapset?
    kv = grass.find_file(map, element="vector", mapset=mapset)
    if not kv["file"]:
        grass.fatal(_("Vector map <%s> not found in current mapset") % map)

    vector = map.split("@", 1)

    # sanity check mutually exclusive color options
    if not options["color"] and not options["raster"] and not options["rules"]:
        grass.fatal(_("Pick one of color, rules, or raster options"))

    if color:
        #### check the color rule is valid
        color_opts = os.listdir(os.path.join(gisbase, "etc", "colors"))
        color_opts += ["random", "grey.eq", "grey.log", "rules"]
        if color not in color_opts:
            grass.fatal(
                _("Invalid color rule <%s>\n") % color +
                _("Valid options are: %s") % " ".join(color_opts))
    elif raster:
        if not grass.find_file(raster)["name"]:
            grass.fatal(_("Raster raster map <%s> not found") % raster)
    elif rules:
        if not os.access(rules, os.R_OK):
            grass.fatal(_("Unable to read color rules file <%s>") % rules)

    # column checks
    # check input data column
    cols = grass.vector_columns(map, layer=layer)
    if column not in cols:
        grass.fatal(_("Column <%s> not found") % column)
    ncolumn_type = cols[column]["type"]
    if ncolumn_type not in ["INTEGER", "DOUBLE PRECISION"]:
        grass.fatal(
            _("Column <%s> is not numeric but %s") % (column, ncolumn_type))

    # check if GRASSRGB column exists, make it if it doesn't
    table = grass.vector_db(map)[int(layer)]["table"]
    if rgb_column not in cols:
        # RGB Column not found, create it
        grass.message(_("Creating column <%s>...") % rgb_column)
        try:
            grass.run_command(
                "v.db.addcolumn",
                map=map,
                layer=layer,
                column="%s varchar(11)" % rgb_column,
            )
        except CalledModuleError:
            grass.fatal(_("Creating color column"))
    else:
        column_type = cols[rgb_column]["type"]
        if column_type not in ["CHARACTER", "TEXT"]:
            grass.fatal(
                _("Column <%s> is not of compatible type (found %s)") %
                (rgb_column, column_type))
        else:
            num_chars = dict([
                (v[0], int(v[2])) for v in grass.db_describe(table)["cols"]
            ])[rgb_column]
            if num_chars < 11:
                grass.fatal(
                    _("Color column <%s> is not wide enough (needs 11 characters)"
                      ),
                    rgb_column,
                )

    cvals = grass.vector_db_select(map, layer=int(layer),
                                   columns=column)["values"].values()

    # find data range
    if range:
        # order doesn't matter
        vals = range.split(",")
    else:
        grass.message(_("Scanning values..."))
        vals = [float(x[0]) for x in cvals]

    minval = min(vals)
    maxval = max(vals)

    grass.verbose(_("Range: [%s, %s]") % (minval, maxval))
    if minval is None or maxval is None:
        grass.fatal(_("Scanning data range"))

    # setup internal region
    grass.use_temp_region()
    grass.run_command("g.region", rows=2, cols=2)

    tmp_colr = "tmp_colr_%d" % pid

    # create dummy raster map
    if ncolumn_type == "INTEGER":
        grass.mapcalc(
            "$tmp_colr = int(if(row() == 1, $minval, $maxval))",
            tmp_colr=tmp_colr,
            minval=minval,
            maxval=maxval,
        )
    else:
        grass.mapcalc(
            "$tmp_colr = double(if(row() == 1, $minval, $maxval))",
            tmp_colr=tmp_colr,
            minval=minval,
            maxval=maxval,
        )

    if color:
        color_cmd = {"color": color}
    elif raster:
        color_cmd = {"raster": raster}
    elif rules:
        color_cmd = {"rules": rules}

    if flip:
        flip_flag = "n"
    else:
        flip_flag = ""

    grass.run_command("r.colors",
                      map=tmp_colr,
                      flags=flip_flag,
                      quiet=True,
                      **color_cmd)

    tmp = grass.tempfile()

    # calculate colors and write SQL command file
    grass.message(_("Looking up colors..."))

    f = open(tmp, "w")
    p = grass.feed_command("r.what.color", flags="i", input=tmp_colr, stdout=f)
    lastval = None
    for v in sorted(vals):
        if v == lastval:
            continue
        p.stdin.write("%f\n" % v)
    p.stdin.close()
    p.wait()
    f.close()

    tmp_vcol = "%s_vcol.sql" % tmp
    fi = open(tmp, "r")
    fo = open(tmp_vcol, "w")
    t = string.Template(
        "UPDATE $table SET $rgb_column = '$colr' WHERE $column = $value;\n")
    found = 0
    for line in fi:
        [value, colr] = line.split(": ")
        colr = colr.strip()
        if len(colr.split(":")) != 3:
            continue
        fo.write(
            t.substitute(
                table=table,
                rgb_column=rgb_column,
                colr=colr,
                column=column,
                value=value,
            ))
        found += 1
    fi.close()
    fo.close()

    if not found:
        grass.fatal(_("No values found in color range"))

    # apply SQL commands to update the table with values
    grass.message(_("Writing %s colors...") % found)

    try:
        grass.run_command("db.execute", input=tmp_vcol)
    except CalledModuleError:
        grass.fatal(_("Processing SQL transaction"))

    if flags["s"]:
        vcolors = "vcolors_%d" % pid
        grass.run_command("g.rename", raster=(tmp_colr, vcolors), quiet=True)
        grass.message(
            _("Raster map containing color rules saved to <%s>") % vcolors)
        # TODO save full v.colors command line history
        grass.run_command(
            "r.support",
            map=vcolors,
            history="",
            source1="vector map = %s" % map,
            source2="column = %s" % column,
            title=_("Dummy raster to use as thematic vector legend"),
            description="generated by v.colors using r.mapcalc",
        )
        grass.run_command(
            "r.support",
            map=vcolors,
            history=_("RGB saved into <%s> using <%s%s%s>") %
            (rgb_column, color, raster, rules),
        )