示例#1
0
def rand_vect_points(name, npoints=10, overwrite=True):
    new = VectorTopo(name)
    new.open('w', overwrite=overwrite)
    for pnt in get_random_points(npoints):
        new.write(pnt)
    new.close()
    return new
示例#2
0
def sumRasterPath(pvc,nogo):
    ng_path = VectorTopo(nogo)
    ng_path.open('r')
    invalid = Set([])
    for seg in ng_path:
        invalid.add(seg.attrs['a_cat'])
    ng_path.close()
    path = VectorTopo(pvc)
    path.open('r')
    actual_costs = {}
    for seg in path:
        if seg.attrs['a_cat'] in invalid:
            continue
        if actual_costs.has_key(seg.attrs['a_cat']):
            actual_costs[seg.attrs['a_cat']]+= seg.attrs['b_friction']*seg.length()
        else:
            actual_costs[seg.attrs['a_cat']] = seg.attrs['b_friction']*seg.length()
    path.close()
    return actual_costs
示例#3
0
def exportResultTable(cell_size):
    output = [['Cat', 'visi_cost', '8_prop', '16_prop', '8_act', '16_act']]
    raster_costs8 = sumRasterPath(pvc8, nogo8)
    raster_costs16 = sumRasterPath(pvc16, nogo16)
    trg = VectorTopo(targetmap)
    trg.open('r')
    for t in trg:
        outr = [str(t.attrs['cat'])]
        print 'cat:', t.attrs['cat']
        print 'visi cost:', t.attrs['cost']
        if t.attrs['cost']>=0:
            outr.append(str(t.attrs['cost']))
        else:
            outr.append('nf')
        if t.attrs[trg_c8]:
            print '8 proposed:',t.attrs[trg_c8]*cell_size
            outr.append(str(t.attrs[trg_c8]*cell_size))
        else:
            outr.append('nf')
            print 'no 8 path found'
        if t.attrs[trg_c16]:
            print '16 proposed:',t.attrs[trg_c16]*cell_size
            outr.append(str(t.attrs[trg_c16]*cell_size))
        else:
            print 'no 16 path found'
            outr.append('nf')            
        if raster_costs8.has_key(t.attrs['cat']):
            print '8 actual:',raster_costs8[t.attrs['cat']]
            outr.append(str(raster_costs8[t.attrs['cat']]))
        else:
            print 'invalid 8 con raster path'
            outr.append('inv')
        if raster_costs16.has_key(t.attrs['cat']):
            print '16 actual:',raster_costs16[t.attrs['cat']]
            outr.append(str(raster_costs16[t.attrs['cat']]))
        else:
            print 'invalid 16 con raster path'
            outr.append('inv')            
        print ""
        output.append(outr)
    np.savetxt(path + result_file, output, delimiter = ';', fmt = '%s')
示例#4
0
"""
# ID numbers
# There should be a way to do this all at once, but...
for i in range(len(cats)):
  grass.run_command('v.db.update', map='HRU', column='id', value=nhru[i], where='cat='+str(cats[i]))
nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
for i in range(len(cats)):
  grass.run_command('v.db.update', map='segment', column='id', value=nsegment[i], where='cat='+str(cats[i]))
"""

nhru = np.arange(1, xy1.shape[0]+1)
nhrut = []
for i in range(len(nhru)):
  nhrut.append( (nhru[i], cats[i]) )
# Access the HRU's 
hru = VectorTopo('HRU')
# Open the map with topology:
hru.open('rw')
# Create a cursor
cur = hru.table.conn.cursor()
# Use it to loop across the table
cur.executemany("update HRU set id=? where cat=?", nhrut)
# Commit changes to the table
hru.table.conn.commit()
# Close the table
hru.close()

# if you want to append to table
# cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

# Same for segments
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

psc = '41115'

obce = VectorTopo('obce')
obce.open('r')

print ("Seznam obci s PSC {}:".format(psc))
obce_psc = set()
for prvek in obce.viter('areas'):
    if prvek.attrs['psc'] != psc:
        continue
    obce_psc.add(prvek.id)
    print (u"{0}: {1}".format(psc, prvek.attrs['nazev']))
    
sousede = set()
for prvek in obce.viter('areas'):
    if prvek.id not in obce_psc:
        continue

    for b in prvek.boundaries():
        for n in b.get_left_right():
            if n != -1 and n != prvek.id:
               sousede.add(n)

print ("Seznam sousednich obce:")
for prvek in obce.viter('areas'):
    if prvek.id not in sousede or \
       prvek.attrs['psc'] == psc:
示例#6
0
def main():
    """
    Builds river segments for input to the USGS hydrologic models
    PRMS and GSFLOW.
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()

    # I/O
    streams = options['input']
    segments = options['output']

    # Hydraulic geometry
    ICALC = int(options['icalc'])

    # ICALC=0: Constant depth
    WIDTH1 = options['width1']
    WIDTH2 = options['width2']

    # ICALC=1,2: Manning (in channel and overbank): below

    # ICALC=3: Power-law relationships (following Leopold and others)
    # The at-a-station default exponents are from Rhodes (1977)
    CDPTH = str(float(options['cdpth']) / 35.3146667) # cfs to m^3/s
    FDPTH = options['fdpth']
    AWDTH = str(float(options['awdth']) / 35.3146667) # cfs to m^3/s
    BWDTH = options['bwdth']

    ##################################################
    # CHECKING DEPENDENCIES WITH OPTIONAL PARAMETERS #
    ##################################################

    if ICALC == 3:
        if CDPTH and FDPTH and AWDTH and BWDTH:
            pass
        else:
            gscript.fatal('Missing CDPTH, FDPTH, AWDTH, and/or BWDTH. \
                         These are required when ICALC = 3.')

    ###########
    # RUNNING #
    ###########

    # New Columns for Segments
    segment_columns = []
    # Self ID
    segment_columns.append('id integer') # segment number
    segment_columns.append('ISEG integer') # segment number
    segment_columns.append('NSEG integer') # segment number
    # for GSFLOW
    segment_columns.append('ICALC integer') # 1 for channel, 2 for channel+fp, 3 for power function
    segment_columns.append('OUTSEG integer') # downstream segment -- tostream, renumbered
    segment_columns.append('ROUGHCH double precision') # overbank roughness
    segment_columns.append('ROUGHBK double precision') # in-channel roughness
    segment_columns.append('WIDTH1 double precision') # overbank roughness
    segment_columns.append('WIDTH2 double precision') # in-channel roughness
    segment_columns.append('CDPTH double precision') # depth coeff
    segment_columns.append('FDPTH double precision') # depth exp
    segment_columns.append('AWDTH double precision') # width coeff
    segment_columns.append('BWDTH double precision') # width exp
    segment_columns.append('floodplain_width double precision') # floodplain width (8-pt approx channel + flat fp)
    # The below will be all 0
    segment_columns.append('IUPSEG varchar') # upstream segment ID number, for diversions
    segment_columns.append('FLOW varchar')
    segment_columns.append('RUNOFF varchar')
    segment_columns.append('ETSW varchar')
    segment_columns.append('PPTSW varchar')

    segment_columns = ",".join(segment_columns)

    # CONSIDER THE EFFECT OF OVERWRITING COLUMNS -- WARN FOR THIS
    # IF MAP EXISTS ALREADY?

    # Create a map to work with
    g.copy(vector=(streams, segments), overwrite=gscript.overwrite())
    # and add its columns
    v.db_addcolumn(map=segments, columns=segment_columns)

    # Produce the data table entries
    ##################################
    colNames = np.array(gscript.vector_db_select(segments, layer=1)['columns'])
    colValues = np.array(gscript.vector_db_select(segments, layer=1)['values'].values())
    number_of_segments = colValues.shape[0]
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()

    nseg = np.arange(1, len(cats)+1)
    nseg_cats = []
    for i in range(len(cats)):
        nseg_cats.append((nseg[i], cats[i]))

    segmentsTopo = VectorTopo(segments)
    segmentsTopo.open('rw')
    cur = segmentsTopo.table.conn.cursor()

    # id = cat (as does ISEG and NSEG)
    cur.executemany("update "+segments+" set id=? where cat=?", nseg_cats)
    cur.executemany("update "+segments+" set ISEG=? where cat=?", nseg_cats)
    cur.executemany("update "+segments+" set NSEG=? where cat=?", nseg_cats)

    # outseg = tostream: default is 0 if "tostream" is off-map
    cur.execute("update "+segments+" set OUTSEG=0")
    cur.executemany("update "+segments+" set OUTSEG=? where tostream=?", nseg_cats)

    # Hydraulic geometry selection
    cur.execute("update "+segments+" set ICALC="+str(ICALC))
    segmentsTopo.table.conn.commit()
    segmentsTopo.close()
    if ICALC == 0:
        gscript.message('')
        gscript.message('ICALC=0 (constant) not supported')
        gscript.message('Continuing nonetheless.')
        gscript.message('')
    if ICALC == 1:
        if options['width_points'] is not '':
            # Can add machinery here for separate upstream and downstream widths
            # But really should not vary all that much
            #v.to_db(map=segments, option='start', columns='xr1,yr1')
            #v.to_db(map=segments, option='end', columns='xr2,yr2')
            gscript.run_command('v.distance', from_=segments, to=options['width_points'], upload='to_attr', to_column=options['width_points_col'], column='WIDTH1')
            v.db_update(map=segments, column='WIDTH2', query_column='WIDTH1')
        else:
            segmentsTopo = VectorTopo(segments)
            segmentsTopo.open('rw')
            cur = segmentsTopo.table.conn.cursor()
            cur.execute("update "+segments+" set WIDTH1="+str(WIDTH1))
            cur.execute("update "+segments+" set WIDTH2="+str(WIDTH2))
            segmentsTopo.table.conn.commit()
            segmentsTopo.close()
    if ICALC == 2:
        # REMOVE THIS MESSAGE ONCE THIS IS INCLUDED IN INPUT-FILE BUILDER
        gscript.message('')
        gscript.message('ICALC=2 (8-point channel + floodplain) not supported')
        gscript.message('Continuing nonetheless.')
        gscript.message('')
        if options['fp_width_pts'] is not '':
            gscript.run_command('v.distance', from_=segments,
                                to=options['fp_width_pts'], upload='to_attr',
                                to_column=options['fp_width_pts_col'],
                                column='floodplain_width')
        else:
            segmentsTopo = VectorTopo(segments)
            segmentsTopo.open('rw')
            cur = segmentsTopo.table.conn.cursor()
            cur.execute("update "+segments+" set floodplain_width="+str(options['fp_width_value']))
            segmentsTopo.table.conn.commit()
            segmentsTopo.close()
    if ICALC == 3:
        segmentsTopo = VectorTopo(segments)
        segmentsTopo.open('rw')
        cur = segmentsTopo.table.conn.cursor()
        cur.execute("update "+segments+" set CDPTH="+str(CDPTH))
        cur.execute("update "+segments+" set FDPTH="+str(FDPTH))
        cur.execute("update "+segments+" set AWDTH="+str(AWDTH))
        cur.execute("update "+segments+" set BWDTH="+str(BWDTH))
        segmentsTopo.table.conn.commit()
        segmentsTopo.close()

    # values that are 0
    gscript.message('')
    gscript.message('NOTICE: not currently used:')
    gscript.message('IUPSEG, FLOW, RUNOFF, ETSW, and PPTSW.')
    gscript.message('All set to 0.')
    gscript.message('')

    segmentsTopo = VectorTopo(segments)
    segmentsTopo.open('rw')
    cur = segmentsTopo.table.conn.cursor()
    cur.execute("update "+segments+" set IUPSEG="+str(0))
    cur.execute("update "+segments+" set FLOW="+str(0))
    cur.execute("update "+segments+" set RUNOFF="+str(0))
    cur.execute("update "+segments+" set ETSW="+str(0))
    cur.execute("update "+segments+" set PPTSW="+str(0))
    segmentsTopo.table.conn.commit()
    segmentsTopo.close()

    # Roughness
    # ICALC=1,2: Manning (in channel)
    if (options['roughch_raster'] is not '') and (options['roughch_points'] is not ''):
        gscript.fatal("Choose either a raster or vector or a value as Manning's n input.")
    if options['roughch_raster'] is not '':
        ROUGHCH = options['roughch_raster']
        v.rast_stats(raster=ROUGHCH, method='average', column_prefix='tmp', map=segments, flags='c')
        #v.db_renamecolumn(map=segments, column='tmp_average,ROUGHCH', quiet=True)
        v.db_update(map=segments, column='ROUGHCH', query_column='tmp_average', quiet=True)
        v.db_dropcolumn(map=segments, columns='tmp_average', quiet=True)
    elif options['roughch_points'] is not '':
        ROUGHCH = options['roughch_points']
        gscript.run_command('v.distance', from_=segments, to=ROUGHCH, upload='to_attr', to_column=options['roughch_pt_col'], column='ROUGHCH')
    else:
        segmentsTopo = VectorTopo(segments)
        segmentsTopo.open('rw')
        cur = segmentsTopo.table.conn.cursor()
        ROUGHCH = options['roughch_value']
        cur.execute("update "+segments+" set ROUGHCH="+str(ROUGHCH))
        segmentsTopo.table.conn.commit()
        segmentsTopo.close()

    # ICALC=2: Manning (overbank)
    if (options['roughbk_raster'] is not '') and (options['roughbk_points'] is not ''):
        gscript.fatal("Choose either a raster or vector or a value as Manning's n input.")
    if options['roughbk_raster'] is not '':
        ROUGHBK = options['roughbk_raster']
        v.rast_stats(raster=ROUGHBK, method='average', column_prefix='tmp', map=segments, flags='c')
        v.db_renamecolumn(map=segments, column='tmp_average,ROUGHBK', quiet=True)
    elif options['roughbk_points'] is not '':
        ROUGHBK = options['roughbk_points']
        gscript.run_command('v.distance', from_=segments, to=ROUGHBK, upload='to_attr', to_column=options['roughbk_pt_col'], column='ROUGHBK')
    else:
        segmentsTopo = VectorTopo(segments)
        segmentsTopo.open('rw')
        cur = segmentsTopo.table.conn.cursor()
        ROUGHBK = options['roughbk_value']
        cur.execute("update "+segments+" set ROUGHBK="+str(ROUGHBK))
        segmentsTopo.table.conn.commit()
        segmentsTopo.close()
示例#7
0
#!/usr/bin/env python3

from grass.pygrass.vector import VectorTopo
from grass.pygrass.vector.geometry import Point

# create the columns definition
cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'name', 'VARCHAR')]
# start new vector with columns definition
new = VectorTopo('pois')
new.open('w', tab_cols=cols, overwrite=True)
# add points
point = Point(681671.15, 5644545.63)
new.write(point, ('Jena', ))
# commit attributes, otherwise they will be not saved
new.table.conn.commit()
# close the vector
new.close()
示例#8
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    options, flags = gscript.parser()
    streams = options['map']
    x1 = options['upstream_easting_column']
    y1 = options['upstream_northing_column']
    x2 = options['downstream_easting_column']
    y2 = options['downstream_northing_column']

    streamsTopo = VectorTopo(streams)
    #streamsTopo.build()

    # 1. Get vectorTopo
    streamsTopo.open(mode='rw')
    """
    points_in_streams = []
    cat_of_line_segment = []

    # 2. Get coordinates
    for row in streamsTopo:
        cat_of_line_segment.append(row.cat)
        if type(row) == vector.geometry.Line:
            points_in_streams.append(row)
    """

    # 3. Coordinates of points: 1 = start, 2 = end
    try:
        streamsTopo.table.columns.add(x1, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(y1, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(x2, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(y2, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('tostream', 'int')
    except:
        pass
    streamsTopo.table.conn.commit()

    # Is this faster than v.to.db?
    """
    cur = streamsTopo.table.conn.cursor()
    for i in range(len(points_in_streams)):
        cur.execute("update streams set x1="+str(points_in_streams[i][0].x)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set y1="+str(points_in_streams[i][0].y)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set x2="+str(points_in_streams[i][-1].x)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set y2="+str(points_in_streams[i][-1].y)+" where cat="+str(cat_of_line_segment[i]))
    streamsTopo.table.conn.commit()
    streamsTopo.build()
    """
    # v.to.db Works more consistently, at least
    streamsTopo.close()
    v.to_db(map=streams, option='start', columns=x1 + ',' + y1)
    v.to_db(map=streams, option='end', columns=x2 + ',' + y2)

    # 4. Read in and save the start and end coordinate points
    colNames = np.array(vector_db_select(streams)['columns'])
    colValues = np.array(vector_db_select(streams)['values'].values())
    cats = colValues[:,
                     colNames == 'cat'].astype(int).squeeze()  # river number
    xy1 = colValues[:, (colNames == 'x1') + (colNames == 'y1')].astype(
        float)  # upstream
    xy2 = colValues[:, (colNames == 'x2') + (colNames == 'y2')].astype(
        float)  # downstream

    # 5. Build river network
    tocat = []
    for i in range(len(cats)):
        tosegment_mask = np.prod(xy1 == xy2[i], axis=1)
        if np.sum(tosegment_mask) == 0:
            tocat.append(0)
        else:
            tocat.append(cats[tosegment_mask.nonzero()[0][0]])
    tocat = np.asarray(tocat).astype(int)

    # This gives us a set of downstream-facing adjacencies.
    # We will update the database with it.
    streamsTopo.build()
    streamsTopo.open('rw')
    cur = streamsTopo.table.conn.cursor()
    # Default to 0 if no stream flows to it
    cur.execute("update " + streams + " set tostream=0")
    for i in range(len(tocat)):
        cur.execute("update " + streams + " set tostream=" + str(tocat[i]) +
                    " where cat=" + str(cats[i]))
    streamsTopo.table.conn.commit()
    #streamsTopo.build()
    streamsTopo.close()

    gscript.message('')
    gscript.message(
        'Drainage topology built. Check "tostream" column for the downstream cat.'
    )
    gscript.message('A cat value of 0 indicates the downstream-most segment.')
    gscript.message('')
示例#9
0
def write_structures(plants, output, elev, stream=None,
                     ndigits=0, resolution=None, contour='',
                     overwrite=False):
    """Write a vector map with the plant structures"""
    def write_hydrostruct(out, hydro, plant):
        pot = plant.potential_power(intakes=[hydro.intake, ])
        (plant_id, itk_id, side,
         disch, gross_head) = (plant.id, hydro.intake.id, hydro.side,
                               float(hydro.intake.discharge),
                               float(hydro.intake.elevation -
                                     plant.restitution.elevation))
        out.write(hydro.conduct,
                  (plant_id, itk_id, disch, 0., 0., 'conduct', side))
        out.write(hydro.penstock,
                  (plant_id, itk_id, disch, gross_head, pot, 'penstock', side))
        out.table.conn.commit()

    tab_cols = [(u'cat', 'INTEGER PRIMARY KEY'),
                (u'plant_id', 'VARCHAR(10)'),
                (u'intake_id', 'INTEGER'),
                (u'discharge', 'DOUBLE'),
                (u'gross_head', 'DOUBLE'),
                (u'power', 'DOUBLE'),
                (u'kind', 'VARCHAR(10)'),
                (u'side', 'VARCHAR(10)'), ]

    with VectorTopo(output, mode='w', overwrite=overwrite) as out:
        link = Link(layer=1, name=output, table=output,
                    driver='sqlite')
        out.open('w')
        out.dblinks.add(link)
        out.table = out.dblinks[0].table()
        out.table.create(tab_cols)

        print('Number of plants: %d' % len(plants))

        # check if contour vector map is provide by the user
        if contour:
            cname, cmset = (contour.split('@') if '@' in contour
                            else (contour, ''))
            # check if the map already exist
            if bool(utils.get_mapset_vector(cname, cmset)) and overwrite:
                compute_contour = True
            remove = False
        else:
            # create a random name
            contour = 'tmp_struct_contour_%05d_%03d' % (os.getpid(),
                                                        random.randint(0, 999))
            compute_contour = True
            remove = True

        if compute_contour:
            # compute the levels of the contour lines map
            levels = []
            for p in plants.values():
                for itk in p.intakes:
                    levels.append(closest(itk.elevation, ndigits=ndigits,
                                          resolution=resolution))
            levels = sorted(set(levels))
            # generate the contur line that pass to the point
            r.contour(input='%s@%s' % (elev.name, elev.mapset),
                      output=contour, step=0, levels=levels, overwrite=True)

        # open the contur lines
        with VectorTopo(contour, mode='r') as cnt:
            for plant in plants.values():
                print(plant.id)
                for options in plant.structures(elev, stream=stream,
                                                ndigits=ndigits,
                                                resolution=resolution,
                                                contour=cnt):
                    for hydro in options:
                        print('writing: ', hydro.intake)
                        write_hydrostruct(out, hydro, plant)

        if remove:
            cnt.remove()
示例#10
0
def main():
    from dateutil.parser import parse

    try:
        from osgeo import ogr, osr
        from osgeo import __version__ as gdal_version
    except ImportError:
        grass.fatal(
            _(
                "Unable to load GDAL Python bindings (requires "
                "package 'python-gdal' or Python library GDAL "
                "to be installed)."
            )
        )
    try:
        from pygbif import occurrences
        from pygbif import species
    except ImportError:
        grass.fatal(
            _(
                "Cannot import pygbif (https://github.com/sckott/pygbif)"
                " library."
                " Please install it (pip install pygbif)"
                " or ensure that it is on path"
                " (use PYTHONPATH variable)."
            )
        )

    # Parse input options
    output = options["output"]
    mask = options["mask"]
    species_maps = flags["i"]
    no_region_limit = flags["r"]
    no_topo = flags["b"]
    print_species = flags["p"]
    print_species_table = flags["t"]
    print_species_shell = flags["g"]
    print_occ_number = flags["o"]
    allow_no_geom = flags["n"]
    hasGeoIssue = flags["s"]
    taxa_list = options["taxa"].split(",")
    institutionCode = options["institutioncode"]
    basisofrecord = options["basisofrecord"]
    recordedby = options["recordedby"].split(",")
    date_from = options["date_from"]
    date_to = options["date_to"]
    country = options["country"]
    continent = options["continent"]
    rank = options["rank"]

    # Define static variable
    # Initialize cat
    cat = 0
    # Number of occurrences to fetch in one request
    chunk_size = 300
    # lat/lon proj string
    latlon_crs = [
        "+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0.000,0.000,0.000",
        "+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0,0,0,0,0,0,0",
        "+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0.000,0.000,0.000 +type=crs",
    ]
    # List attributes available in Darwin Core
    # not all attributes are returned in each request
    # to avoid key errors when accessing the dictionary returned by pygbif
    # presence of DWC keys in the returned dictionary is checked using this list
    # The number of keys in this list has to be equal to the number of columns
    # in the attribute table and the attributes written for each occurrence
    dwc_keys = [
        "key",
        "taxonRank",
        "taxonKey",
        "taxonID",
        "scientificName",
        "species",
        "speciesKey",
        "genericName",
        "genus",
        "genusKey",
        "family",
        "familyKey",
        "order",
        "orderKey",
        "class",
        "classKey",
        "phylum",
        "phylumKey",
        "kingdom",
        "kingdomKey",
        "eventDate",
        "verbatimEventDate",
        "startDayOfYear",
        "endDayOfYear",
        "year",
        "month",
        "day",
        "occurrenceID",
        "occurrenceStatus",
        "occurrenceRemarks",
        "Habitat",
        "basisOfRecord",
        "preparations",
        "sex",
        "type",
        "locality",
        "verbatimLocality",
        "decimalLongitude",
        "decimalLatitude",
        "coordinateUncertaintyInMeters",
        "geodeticDatum",
        "higerGeography",
        "continent",
        "country",
        "countryCode",
        "stateProvince",
        "gbifID",
        "protocol",
        "identifier",
        "recordedBy",
        "identificationID",
        "identifiers",
        "dateIdentified",
        "modified",
        "institutionCode",
        "lastInterpreted",
        "lastParsed",
        "references",
        "relations",
        "catalogNumber",
        "occurrenceDetails",
        "datasetKey",
        "datasetName",
        "collectionCode",
        "rights",
        "rightsHolder",
        "license",
        "publishingOrgKey",
        "publishingCountry",
        "lastCrawled",
        "specificEpithet",
        "facts",
        "issues",
        "extensions",
        "language",
    ]
    # Deinfe columns for attribute table
    cols = [
        ("cat", "INTEGER PRIMARY KEY"),
        ("g_search", "varchar(100)"),
        ("g_key", "integer"),
        ("g_taxonrank", "varchar(50)"),
        ("g_taxonkey", "integer"),
        ("g_taxonid", "varchar(50)"),
        ("g_scientificname", "varchar(255)"),
        ("g_species", "varchar(255)"),
        ("g_specieskey", "integer"),
        ("g_genericname", "varchar(255)"),
        ("g_genus", "varchar(50)"),
        ("g_genuskey", "integer"),
        ("g_family", "varchar(50)"),
        ("g_familykey", "integer"),
        ("g_order", "varchar(50)"),
        ("g_orderkey", "integer"),
        ("g_class", "varchar(50)"),
        ("g_classkey", "integer"),
        ("g_phylum", "varchar(50)"),
        ("g_phylumkey", "integer"),
        ("g_kingdom", "varchar(50)"),
        ("g_kingdomkey", "integer"),
        ("g_eventdate", "text"),
        ("g_verbatimeventdate", "varchar(50)"),
        ("g_startDayOfYear", "integer"),
        ("g_endDayOfYear", "integer"),
        ("g_year", "integer"),
        ("g_month", "integer"),
        ("g_day", "integer"),
        ("g_occurrenceid", "varchar(255)"),
        ("g_occurrenceStatus", "varchar(50)"),
        ("g_occurrenceRemarks", "varchar(50)"),
        ("g_Habitat", "varchar(50)"),
        ("g_basisofrecord", "varchar(50)"),
        ("g_preparations", "varchar(50)"),
        ("g_sex", "varchar(50)"),
        ("g_type", "varchar(50)"),
        ("g_locality", "varchar(255)"),
        ("g_verbatimlocality", "varchar(255)"),
        ("g_decimallongitude", "double precision"),
        ("g_decimallatitude", "double precision"),
        ("g_coordinateUncertaintyInMeters", "double precision"),
        ("g_geodeticdatum", "varchar(50)"),
        ("g_higerGeography", "varchar(255)"),
        ("g_continent", "varchar(50)"),
        ("g_country", "varchar(50)"),
        ("g_countryCode", "varchar(50)"),
        ("g_stateProvince", "varchar(50)"),
        ("g_gbifid", "varchar(255)"),
        ("g_protocol", "varchar(255)"),
        ("g_identifier", "varchar(50)"),
        ("g_recordedby", "varchar(255)"),
        ("g_identificationid", "varchar(255)"),
        ("g_identifiers", "text"),
        ("g_dateidentified", "text"),
        ("g_modified", "text"),
        ("g_institutioncode", "varchar(50)"),
        ("g_lastinterpreted", "text"),
        ("g_lastparsed", "text"),
        ("g_references", "varchar(255)"),
        ("g_relations", "text"),
        ("g_catalognumber", "varchar(50)"),
        ("g_occurrencedetails", "text"),
        ("g_datasetkey", "varchar(50)"),
        ("g_datasetname", "varchar(255)"),
        ("g_collectioncode", "varchar(50)"),
        ("g_rights", "varchar(255)"),
        ("g_rightsholder", "varchar(255)"),
        ("g_license", "varchar(50)"),
        ("g_publishingorgkey", "varchar(50)"),
        ("g_publishingcountry", "varchar(50)"),
        ("g_lastcrawled", "text"),
        ("g_specificepithet", "varchar(50)"),
        ("g_facts", "text"),
        ("g_issues", "text"),
        ("g_extensions", "text"),
        ("g_language", "varchar(50)"),
    ]

    # maybe no longer required in Python3
    set_output_encoding()
    # Set temporal filter if requested by user
    # Initialize eventDate filter
    eventDate = None
    # Check if date from is compatible (ISO compliant)
    if date_from:
        try:
            parse(date_from)
        except:
            grass.fatal("Invalid invalid start date provided")

        if date_from and not date_to:
            eventDate = "{}".format(date_from)
    # Check if date to is compatible (ISO compliant)
    if date_to:
        try:
            parse(date_to)
        except:
            grass.fatal("Invalid invalid end date provided")
        # Check if date to is after date_from
        if parse(date_from) < parse(date_to):
            eventDate = "{},{}".format(date_from, date_to)
        else:
            grass.fatal("Invalid date range: End date has to be after start date!")
    # Set filter on basisOfRecord if requested by user
    if basisofrecord == "ALL":
        basisOfRecord = None
    else:
        basisOfRecord = basisofrecord
    # Allow also occurrences with spatial issues if requested by user
    hasGeospatialIssue = False
    if hasGeoIssue:
        hasGeospatialIssue = True
    # Allow also occurrences without coordinates if requested by user
    hasCoordinate = True
    if allow_no_geom:
        hasCoordinate = False

    # Set reprojection parameters
    # Set target projection of current LOCATION
    proj_info = grass.parse_command("g.proj", flags="g")
    target_crs = grass.read_command("g.proj", flags="fj").rstrip()
    target = osr.SpatialReference()

    # Prefer EPSG CRS definitions
    if proj_info.get("epsg"):
        target.ImportFromEPSG(int(proj_info["epsg"]))
    else:
        target.ImportFromProj4(target_crs)

    # GDAL >= 3 swaps x and y axis, see: github.com/gdal/issues/1546
    if int(gdal_version[0]) >= 3:
        target.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)

    if target_crs == "XY location (unprojected)":
        grass.fatal("Sorry, XY locations are not supported!")

    # Set source projection from GBIF
    source = osr.SpatialReference()
    source.ImportFromEPSG(4326)
    # GDAL >= 3 swaps x and y axis, see: github.com/gdal/issues/1546
    if int(gdal_version[0]) >= 3:
        source.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)

    if target_crs not in latlon_crs:
        transform = osr.CoordinateTransformation(source, target)
        reverse_transform = osr.CoordinateTransformation(target, source)

    # Generate WKT polygon to use for spatial filtering if requested
    if mask:
        if len(mask.split("@")) == 2:
            m = VectorTopo(mask.split("@")[0], mapset=mask.split("@")[1])
        else:
            m = VectorTopo(mask)
        if not m.exist():
            grass.fatal("Could not find vector map <{}>".format(mask))
        m.open("r")
        if not m.is_open():
            grass.fatal("Could not open vector map <{}>".format(mask))

        # Use map Bbox as spatial filter if map contains <> 1 area
        if m.number_of("areas") == 1:
            region_pol = [area.to_wkt() for area in m.viter("areas")][0]
        else:
            bbox = (
                str(m.bbox())
                .replace("Bbox(", "")
                .replace(" ", "")
                .rstrip(")")
                .split(",")
            )
            region_pol = (
                "POLYGON (({0} {1}, {0} {3}, {2} {3}, {2} {1}, {0} {1}))".format(
                    bbox[2], bbox[0], bbox[3], bbox[1]
                )
            )
        m.close()
    else:
        # Do not limit import spatially if LOCATION is able to take global data
        if no_region_limit:
            if target_crs not in latlon_crs:
                grass.fatal(
                    "Import of data from outside the current region is"
                    "only supported in a WGS84 location!"
                )
            region_pol = None
        else:
            # Limit import spatially to current region
            # if LOCATION is !NOT! able to take global data
            # to avoid pprojection ERRORS
            region = grass.parse_command("g.region", flags="g")
            region_pol = "POLYGON (({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format(
                region["e"], region["n"], region["w"], region["s"]
            )

    # Do not reproject in latlon LOCATIONS
    if target_crs not in latlon_crs:
        pol = ogr.CreateGeometryFromWkt(region_pol)
        pol.Transform(reverse_transform)
        pol = pol.ExportToWkt()
    else:
        pol = region_pol

    # Create output map if not output maps for each species are requested
    if (
        not species_maps
        and not print_species
        and not print_species_shell
        and not print_occ_number
        and not print_species_table
    ):
        mapname = output
        new = Vector(mapname)
        new.open("w", tab_name=mapname, tab_cols=cols)
        cat = 1

    # Import data for each species
    for s in taxa_list:
        # Get the taxon key if not the taxon key is provided as input
        try:
            key = int(s)
        except:
            try:
                species_match = species.name_backbone(
                    s, rank=rank, strict=False, verbose=True
                )
                key = species_match["usageKey"]
            except:
                grass.error(
                    "Data request for taxon {} failed. Are you online?".format(s)
                )
                continue

        # Return matching taxon and alternatives and exit
        if print_species:
            print("Matching taxon for {} is:".format(s))
            print(
                "{} {}".format(species_match["scientificName"], species_match["status"])
            )
            if "alternatives" in list(species_match.keys()):
                print("Alternative matches might be: {}".format(s))
                for m in species_match["alternatives"]:
                    print("{} {}".format(m["scientificName"], m["status"]))
            else:
                print("No alternatives found for the given taxon")
            continue
        if print_species_shell:
            print("match={}".format(species_match["scientificName"]))
            if "alternatives" in list(species_match.keys()):
                alternatives = []
                for m in species_match["alternatives"]:
                    alternatives.append(m["scientificName"])
                print("alternatives={}".format(",".join(alternatives)))
            continue
        if print_species_table:
            if "alternatives" in list(species_match.keys()):
                if len(species_match["alternatives"]) == 0:
                    print(
                        "{0}|{1}|{2}|".format(s, key, species_match["scientificName"])
                    )
                else:
                    alternatives = []
                    for m in species_match["alternatives"]:
                        alternatives.append(m["scientificName"])
                    print(
                        "{0}|{1}|{2}|{3}".format(
                            s,
                            key,
                            species_match["scientificName"],
                            ",".join(alternatives),
                        )
                    )
            continue
        try:
            returns_n = occurrences.search(
                taxonKey=key,
                hasGeospatialIssue=hasGeospatialIssue,
                hasCoordinate=hasCoordinate,
                institutionCode=institutionCode,
                basisOfRecord=basisOfRecord,
                recordedBy=recordedby,
                eventDate=eventDate,
                continent=continent,
                country=country,
                geometry=pol,
                limit=1,
            )["count"]
        except:
            grass.error("Data request for taxon {} faild. Are you online?".format(s))
            returns_n = 0

        # Exit if search does not give a return
        # Print only number of returns for the given search and exit
        if print_occ_number:
            print("Found {0} occurrences for taxon {1}...".format(returns_n, s))
            continue
        elif returns_n <= 0:
            grass.warning(
                "No occurrences for current search for taxon {0}...".format(s)
            )
            continue
        elif returns_n >= 200000:
            grass.warning(
                "Your search for {1} returns {0} records.\n"
                "Unfortunately, the GBIF search API is limited to 200,000 records per request.\n"
                "The download will be incomplete. Please consider to split up your search.".format(
                    returns_n, s
                )
            )

        # Get the number of chunks to download
        chunks = int(math.ceil(returns_n / float(chunk_size)))
        grass.verbose(
            "Downloading {0} occurrences for taxon {1}...".format(returns_n, s)
        )

        # Create a map for each species if requested using map name as suffix
        if species_maps:
            mapname = "{}_{}".format(s.replace(" ", "_"), output)

            new = Vector(mapname)
            new.open("w", tab_name=mapname, tab_cols=cols)
            cat = 0

        # Download the data from GBIF
        for c in range(chunks):
            # Define offset
            offset = c * chunk_size
            # Adjust chunk_size to the hard limit of 200,000 records in GBIF API
            # if necessary
            if offset + chunk_size >= 200000:
                chunk_size = 200000 - offset
            # Get the returns for the next chunk
            returns = occurrences.search(
                taxonKey=key,
                hasGeospatialIssue=hasGeospatialIssue,
                hasCoordinate=hasCoordinate,
                institutionCode=institutionCode,
                basisOfRecord=basisOfRecord,
                recordedBy=recordedby,
                eventDate=eventDate,
                continent=continent,
                country=country,
                geometry=pol,
                limit=chunk_size,
                offset=offset,
            )

            # Write the returned data to map and attribute table
            for res in returns["results"]:
                if target_crs not in latlon_crs:
                    point = ogr.CreateGeometryFromWkt(
                        "POINT ({} {})".format(
                            res["decimalLongitude"], res["decimalLatitude"]
                        )
                    )
                    point.Transform(transform)
                    x = point.GetX()
                    y = point.GetY()
                else:
                    x = res["decimalLongitude"]
                    y = res["decimalLatitude"]

                point = Point(x, y)

                for k in dwc_keys:
                    if k not in list(res.keys()):
                        res.update({k: None})

                cat = cat + 1
                new.write(
                    point,
                    cat=cat,
                    attrs=(
                        "{}".format(s),
                        res["key"],
                        res["taxonRank"],
                        res["taxonKey"],
                        res["taxonID"],
                        res["scientificName"],
                        res["species"],
                        res["speciesKey"],
                        res["genericName"],
                        res["genus"],
                        res["genusKey"],
                        res["family"],
                        res["familyKey"],
                        res["order"],
                        res["orderKey"],
                        res["class"],
                        res["classKey"],
                        res["phylum"],
                        res["phylumKey"],
                        res["kingdom"],
                        res["kingdomKey"],
                        "{}".format(res["eventDate"]) if res["eventDate"] else None,
                        "{}".format(res["verbatimEventDate"])
                        if res["verbatimEventDate"]
                        else None,
                        res["startDayOfYear"],
                        res["endDayOfYear"],
                        res["year"],
                        res["month"],
                        res["day"],
                        res["occurrenceID"],
                        res["occurrenceStatus"],
                        res["occurrenceRemarks"],
                        res["Habitat"],
                        res["basisOfRecord"],
                        res["preparations"],
                        res["sex"],
                        res["type"],
                        res["locality"],
                        res["verbatimLocality"],
                        res["decimalLongitude"],
                        res["decimalLatitude"],
                        res["coordinateUncertaintyInMeters"],
                        res["geodeticDatum"],
                        res["higerGeography"],
                        res["continent"],
                        res["country"],
                        res["countryCode"],
                        res["stateProvince"],
                        res["gbifID"],
                        res["protocol"],
                        res["identifier"],
                        res["recordedBy"],
                        res["identificationID"],
                        ",".join(res["identifiers"]),
                        "{}".format(res["dateIdentified"])
                        if res["dateIdentified"]
                        else None,
                        "{}".format(res["modified"]) if res["modified"] else None,
                        res["institutionCode"],
                        "{}".format(res["lastInterpreted"])
                        if res["lastInterpreted"]
                        else None,
                        "{}".format(res["lastParsed"]) if res["lastParsed"] else None,
                        res["references"],
                        ",".join(res["relations"]),
                        res["catalogNumber"],
                        "{}".format(res["occurrenceDetails"])
                        if res["occurrenceDetails"]
                        else None,
                        res["datasetKey"],
                        res["datasetName"],
                        res["collectionCode"],
                        res["rights"],
                        res["rightsHolder"],
                        res["license"],
                        res["publishingOrgKey"],
                        res["publishingCountry"],
                        "{}".format(res["lastCrawled"]) if res["lastCrawled"] else None,
                        res["specificEpithet"],
                        ",".join(res["facts"]),
                        ",".join(res["issues"]),
                        ",".join(res["extensions"]),
                        res["language"],
                    ),
                )

                cat = cat + 1

        # Close the current map if a map for each species is requested
        if species_maps:
            new.table.conn.commit()
            new.close()
            if not no_topo:
                grass.run_command("v.build", map=mapname, option="build")

            # Write history to map
            grass.vector_history(mapname)

    # Close the output map if not a map for each species is requested
    if (
        not species_maps
        and not print_species
        and not print_species_shell
        and not print_occ_number
        and not print_species_table
    ):
        new.table.conn.commit()
        new.close()
        if not no_topo:
            grass.run_command("v.build", map=mapname, option="build")

        # Write history to map
        grass.vector_history(mapname)
示例#11
0
def run_test_C(val_pts, dsm, source, memory, cores, density, radius, method,
               elev, val_file):
    """Test combination of parametrisation methods and exposure radii
    :val_pts: Validation points
    :param dsm: Input DSM
    :param source: Exposure source
    :param memory: Allocated memory [MB]
    :param cores: Number of cores
    :param density: Sampling density
    :param radius: Exposure range
    :param methods: Parametrisation method
    :param elev: Observer elevation
    :param val_file: File to store the validation results
    """
    nsres = grass.raster_info(dsm)["nsres"]
    ewres = grass.raster_info(dsm)["ewres"]

    # new VectorTopo object
    val_pts_topo = VectorTopo(val_pts)
    val_pts_topo.open("rw")

    no_points = val_pts_topo.number_of("points")
    counter = 0

    # open file where validation outputs will be written
    with open(val_file, "a") as outfile:
        fieldnames = [
            "pt_id",
            "method",
            "radius",
            "density",
            "gvi",
            "ela_t",
            "usr_t",
            "sys_t",
            "cpu",
            "mem",
            "exit",
        ]
        writer = csv.DictWriter(outfile, fieldnames=fieldnames)
        writer.writeheader()

        # iterate over points
        for pt in val_pts_topo.viter("points"):
            grass.percent(counter, no_points, 1)
            counter += 1

            pt_id = pt.attrs["img_no"]
            pt_x = pt.x
            pt_y = pt.y

            # set region around processed point
            grass.run_command(
                "g.region",
                align=dsm,
                n=pt_y + (radius + nsres / 2.0),
                s=pt_y - (radius + nsres / 2.0),
                e=pt_x + (radius + ewres / 2.0),
                w=pt_x - (radius + ewres / 2.0),
            )

            # Calculate GVI
            r_GVI = "tmp_GVI_{p}_{r}_{m}_{s}_m".format(p=pt_id,
                                                       s=density,
                                                       r=radius,
                                                       m=method)

            r_viewshed_profile = Popen(
                [
                    "/usr/bin/time",
                    "-v",
                    "r.viewshed.exposure",
                    "--q",
                    "--o",
                    "input={}".format(dsm),
                    "output={}".format(r_GVI),
                    "source={}".format(source),
                    "observer_elevation={}".format(elev),
                    "range={}".format(radius),
                    "function={}".format(method),
                    "sample_density={}".format(density),
                    "memory={}".format(memory),
                    "nprocs={}".format(cores),
                ],
                stdout=PIPE,
                stderr=PIPE,
            ).communicate()

            r_viewshed_profile = (r_viewshed_profile[1].decode("utf8").strip()
                                  [r_viewshed_profile[1].decode("utf8").
                                   find("Command being timed") - 1:])
            r_viewshed_profile = dict(
                item.split(": ")
                for item in r_viewshed_profile.replace("\t", "").split("\n"))

            # check if r.viewshed.exposure finished sucessfuly
            if r_viewshed_profile["Exit status"] == "0":
                # extract GVI value at point
                gvi = grass.read_command("r.what",
                                         map=r_GVI,
                                         coordinates="{},{}".format(
                                             pt_x, pt_y)).split("|")[3]
            else:
                gvi = 0.0

            # write profiling information
            cpu = r_viewshed_profile["Percent of CPU this job got"].replace(
                "%", "")
            el_time_list = r_viewshed_profile[
                "Elapsed (wall clock) time (h:mm:ss or m:ss)"].split(":")
            el_time_s = (float(el_time_list[-1]) +
                         60 * float(el_time_list[-2]) +
                         (3600 * float(el_time_list[-3])
                          if len(el_time_list) > 2 else 0))

            writer.writerow({
                "pt_id":
                pt_id,
                "method":
                method,
                "radius":
                radius,
                "density":
                density,
                "gvi":
                gvi,
                "ela_t":
                el_time_s,
                "usr_t":
                r_viewshed_profile["User time (seconds)"],
                "sys_t":
                r_viewshed_profile["System time (seconds)"],
                "cpu":
                cpu,
                "mem":
                r_viewshed_profile["Maximum resident set size (kbytes)"],
                "exit":
                r_viewshed_profile["Exit status"],
            })
            grass.message("{},{},{},{},{},{},{},{},{},{},{}".format(
                pt_id,
                method,
                radius,
                density,
                gvi,
                el_time_s,
                r_viewshed_profile["User time (seconds)"],
                r_viewshed_profile["System time (seconds)"],
                cpu,
                r_viewshed_profile["Maximum resident set size (kbytes)"],
                r_viewshed_profile["Exit status"],
            ))

    # Close vector access
    val_pts_topo.close()

    return
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

psc = '41115'

obce = VectorTopo('obce')
obce.open('r')

vystup = VectorTopo('obce_psc_{}'.format(psc))
vystup.open('w', tab_cols=[('cat',       'INTEGER PRIMARY KEY'),
                           ('nazev',     'TEXT'),
                           ('psc',       'INTEGER')])

obec_id = None
obce_psc = set()
for prvek in obce.viter('areas'):
    if prvek.attrs['psc'] == psc:
        if obec_id is None:
            obec_id = prvek.id
            
        for b in prvek.boundaries():
            for n in b.get_left_right():
                if n != -1 and n != obec_id:
                    obce_psc.add(n)
obce_psc.add(obec_id)

hranice = list()
for prvek in obce.viter('areas'):
    if prvek.id not in obce_psc:
        continue
示例#13
0
def main(options, flags):
    import grass.pygrass.modules as pymod
    import grass.temporal as tgis
    from grass.pygrass.vector import VectorTopo

    invect = options["input"]
    if invect.find("@") != -1:
        invect = invect.split("@")[0]
    incol = options["date_column"]
    indate = options["date"]
    endcol = options["final_date_column"]
    enddate = options["final_date"]
    strds = options["strds"]
    nprocs = options["nprocs"]
    if strds.find("@") != -1:
        strds_name = strds.split("@")[0]
    else:
        strds_name = strds
    output = options["output"]
    if options["columns"]:
        cols = options["columns"].split(",")
    else:
        cols = []
    mets = options["method"].split(",")
    gran = options["granularity"]
    dateformat = options["date_format"]
    separator = gscript.separator(options["separator"])
    update = flags["u"]
    create = flags["c"]

    stdout = False
    if output != "-" and update:
        gscript.fatal(_("Cannot combine 'output' option and 'u' flag"))
    elif output != "-" and create:
        gscript.fatal(_("Cannot combine 'output' option and 'c' flag"))
    elif output == "-" and (update or create):
        if update and not cols:
            gscript.fatal(_("Please set 'columns' option"))
        output = invect
    else:
        stdout = True

    if create:
        cols = []
        for m in mets:
            colname = "{st}_{me}".format(st=strds_name, me=m)
            cols.append(colname)
            try:
                pymod.Module(
                    "v.db.addcolumn",
                    map=invect,
                    columns="{col} "
                    "double precision".format(col=colname),
                )
            except CalledModuleError:
                gscript.fatal(
                    _("Not possible to create column "
                      "{col}".format(col=colname)))
        gscript.warning(
            _("Attribute table of vector {name} will be updated"
              "...").format(name=invect))
    elif update:
        colexist = pymod.Module("db.columns", table=invect,
                                stdout_=PI).outputs.stdout.splitlines()
        for col in cols:
            if col not in colexist:
                gscript.fatal(
                    _("Column '{}' does not exist, please create it first".
                      format(col)))
        gscript.warning(
            _("Attribute table of vector {name} will be updated"
              "...").format(name=invect))

    if output != "-" and len(cols) != len(mets):
        gscript.fatal(
            _("'columns' and 'method' options must have the same "
              "number of elements"))
    tgis.init()
    dbif = tgis.SQLDatabaseInterfaceConnection()
    dbif.connect()
    sp = tgis.open_old_stds(strds, "strds", dbif)

    if sp.get_temporal_type() == "absolute":
        if gran:
            delta = int(tgis.gran_to_gran(gran, sp.get_granularity(), True))
            if tgis.gran_singular_unit(gran) in ["year", "month"]:
                delta = int(tgis.gran_to_gran(gran, "1 day", True))
                td = timedelta(delta)
            elif tgis.gran_singular_unit(gran) == "day":
                delta = tgis.gran_to_gran(gran, sp.get_granularity(), True)
                td = timedelta(delta)
            elif tgis.gran_singular_unit(gran) == "hour":
                td = timedelta(hours=delta)
            elif tgis.gran_singular_unit(gran) == "minute":
                td = timedelta(minutes=delta)
            elif tgis.gran_singular_unit(gran) == "second":
                td = timedelta(seconds=delta)
        else:
            td = None
    else:
        if sp.get_granularity() >= int(gran):
            gscript.fatal(
                _("Input granularity is smaller or equal to the {iv}"
                  " STRDS granularity".format(iv=strds)))
        td = int(gran)
    if incol and indate:
        gscript.fatal(_("Cannot combine 'date_column' and 'date' options"))
    elif not incol and not indate:
        gscript.fatal(_("You have to fill 'date_column' or 'date' option"))
    if incol:
        if endcol:
            mysql = "SELECT DISTINCT {dc},{ec} from {vmap} order by " "{dc}".format(
                vmap=invect, dc=incol, ec=endcol)
        else:
            mysql = "SELECT DISTINCT {dc} from {vmap} order by " "{dc}".format(
                vmap=invect, dc=incol)
        try:
            dates = pymod.Module("db.select",
                                 flags="c",
                                 stdout_=PI,
                                 stderr_=PI,
                                 sql=mysql)
            mydates = dates.outputs["stdout"].value.splitlines()
        except CalledModuleError:
            gscript.fatal(_("db.select return an error"))
    elif indate:
        if enddate:
            mydates = ["{ida}|{eda}".format(ida=indate, eda=enddate)]
        else:
            mydates = [indate]
        mydates = [indate]
        pymap = VectorTopo(invect)
        pymap.open("r")
        if len(pymap.dblinks) == 0:
            try:
                pymap.close()
                pymod.Module("v.db.addtable", map=invect)
            except CalledModuleError:
                dbif.close()
                gscript.fatal(
                    _("Unable to add table <%s> to vector map "
                      "<%s>" % invect))
        if pymap.is_open():
            pymap.close()
        qfeat = pymod.Module("v.category",
                             stdout_=PI,
                             stderr_=PI,
                             input=invect,
                             option="print")
        myfeats = qfeat.outputs["stdout"].value.splitlines()

    if stdout:
        outtxt = ""
    for data in mydates:
        try:
            start, final = data.split("|")
        except ValueError:
            start = data
            final = None
        if sp.get_temporal_type() == "absolute":
            fdata = datetime.strptime(start, dateformat)
        else:
            fdata = int(start)
        if final:
            sdata = datetime.strptime(final, dateformat)
        elif flags["a"]:
            sdata = fdata + td
        else:
            sdata = fdata
            fdata = sdata - td
        mwhere = "start_time >= '{inn}' and start_time < " "'{out}'".format(
            inn=fdata, out=sdata)
        lines = None
        try:
            r_what = pymod.Module(
                "t.rast.what",
                points=invect,
                strds=strds,
                layout="timerow",
                separator=separator,
                flags="v",
                where=mwhere,
                quiet=True,
                stdout_=PI,
                stderr_=PI,
                nprocs=nprocs,
            )
            lines = r_what.outputs["stdout"].value.splitlines()
        except CalledModuleError:
            gscript.warning("t.rast.what faild with where='{}'".format(mwhere))
            pass
        if incol:
            if endcol:
                mysql = ("SELECT DISTINCT cat from {vmap} where {dc}='{da}' "
                         "AND {ec}='{ed}' order by cat".format(vmap=invect,
                                                               da=start,
                                                               dc=incol,
                                                               ed=final,
                                                               ec=endcol))
            else:
                mysql = ("SELECT DISTINCT cat from {vmap} where {dc}='{da}' "
                         "order by cat".format(vmap=invect, da=start,
                                               dc=incol))
            try:
                qfeat = pymod.Module("db.select",
                                     flags="c",
                                     stdout_=PI,
                                     stderr_=PI,
                                     sql=mysql)
                myfeats = qfeat.outputs["stdout"].value.splitlines()
            except CalledModuleError:
                gscript.fatal(
                    _("db.select returned an error for date "
                      "{da}".format(da=start)))
        if not lines and stdout:
            for feat in myfeats:
                outtxt += "{di}{sep}{da}".format(di=feat,
                                                 da=start,
                                                 sep=separator)
                for n in range(len(mets)):
                    outtxt += "{sep}{val}".format(val="*", sep=separator)
                outtxt += "\n"
        if not lines:
            continue
        x = 0
        for line in lines:
            vals = line.split(separator)
            if vals[0] in myfeats:
                try:
                    nvals = np.array(vals[3:]).astype(float)
                except ValueError:
                    if stdout:
                        outtxt += "{di}{sep}{da}".format(di=vals[0],
                                                         da=start,
                                                         sep=separator)
                        for n in range(len(mets)):
                            outtxt += "{sep}{val}".format(val="*",
                                                          sep=separator)
                        outtxt += "\n"
                    continue
                if stdout:
                    outtxt += "{di}{sep}{da}".format(di=vals[0],
                                                     da=start,
                                                     sep=separator)
                for n in range(len(mets)):
                    result = None
                    if len(nvals) == 1:
                        result = nvals[0]
                    elif len(nvals) > 1:
                        result = return_value(nvals, mets[n])
                    if stdout:
                        if not result:
                            result = "*"
                        outtxt += "{sep}{val}".format(val=result,
                                                      sep=separator)
                    else:
                        try:
                            if incol:
                                mywhe = "{dc}='{da}' AND ".format(da=start,
                                                                  dc=incol)
                                if endcol:
                                    mywhe += "{dc}='{da}' AND ".format(
                                        da=final, dc=endcol)

                                mywhe += "cat={ca}".format(ca=vals[0])

                                pymod.Module(
                                    "v.db.update",
                                    map=output,
                                    column=cols[n],
                                    value=str(result),
                                    where=mywhe,
                                )
                            else:
                                pymod.Module(
                                    "v.db.update",
                                    map=output,
                                    column=cols[n],
                                    value=str(result),
                                    where="cat={ca}".format(ca=vals[0]),
                                )
                        except CalledModuleError:
                            gscript.fatal(_("v.db.update return an error"))
                if stdout:
                    outtxt += "\n"
                if x == len(myfeats):
                    break
                else:
                    x += 1
    if stdout:
        print(outtxt)
示例#14
0
def write_results2newvec(stream, E, basins_tot, inputs):
    """
    Create the stream vector and
    write the basins object in a vector with the same cat value
    of the ID basin
    """
    pid = os.getpid()
    tmp_thin = "tmprgreen_%i_thin" % pid
    tmp_clean = "tmprgreen_%i_clean" % pid
    gcore.run_command("r.thin", input=stream, output=tmp_thin)
    gcore.run_command("r.to.vect",
                      input=tmp_thin,
                      flags='v',
                      output=tmp_clean,
                      type="line")
    gcore.run_command("v.edit", map=tmp_clean, tool='delete', cats='0')
    #pdb.set_trace()
    gcore.run_command('v.build', map=tmp_clean)
    dissolve_lines(tmp_clean, E)
    # TODO: dissolve the areas with the same cat
    # adding columns
    gcore.run_command("v.db.addcolumn",
                      map=E,
                      columns="Qown double precision,"
                      "Qtot double precision, Hmean double precision,"
                      "H0 double precision, Eown_kW double precision,"
                      "IDup1 int, Eup1_kW double precision,"
                      "IDup2 int, Eup2_kW double precision,"
                      "IDup3 int, Eup3_kW double precision,"
                      "Etot_kW double precision")
    gcore.run_command("db.dropcolumn", flags="f", table=E, column="label")
    # Open database connection
    vec = VectorTopo(E)
    vec.open("rw")
    link = vec.dblinks[0]
    conn = link.connection()
    # prepare a cursor object using cursor() method
    cursor = conn.cursor()
    # I modify with specific power (kW/km) see
    # 4._Julio_Alterach_-_Evaluation_of_the_residual_potential_
    # hydropower_production_in_Italy
    # compute the lenght of the river in a basin
    #import ipdb; ipdb.set_trace()
    for ID in inputs:
        length = 0
        for l in vec.cat(ID, 'lines'):
            length += l.length()
        basins_tot[ID].length = length
        db = [
            basins_tot[ID].discharge_own, basins_tot[ID].discharge_tot,
            basins_tot[ID].h_mean, basins_tot[ID].h_closure,
            basins_tot[ID].E_own
        ]
        if len(basins_tot[ID].E_up) == 0:
            db = db + [0, 0.0, 0, 0.0, 0, 0.0, basins_tot[ID].E_own]
        elif len(basins_tot[ID].E_up) == 1:
            db = (db + [
                list(basins_tot[ID].E_up.keys())[0],
                list(basins_tot[ID].E_up.values())[0], 0, 0.0, 0, 0.0,
                basins_tot[ID].E_own + sum(basins_tot[ID].E_up.values())
            ])
        elif len(basins_tot[ID].E_up) == 2:
            db = (db + [
                list(basins_tot[ID].E_up.keys())[0],
                list(basins_tot[ID].E_up.values())[0],
                list(basins_tot[ID].E_up.keys())[1],
                list(basins_tot[ID].E_up.values())[1], 0, 0.0,
                basins_tot[ID].E_own + sum(basins_tot[ID].E_up.values())
            ])
        elif len(basins_tot[ID].E_up) == 3:
            #pdb.set_trace()
            db = (db + [
                list(basins_tot[ID].E_up.keys())[0],
                list(basins_tot[ID].E_up.values())[0],
                list(basins_tot[ID].E_up.keys())[1],
                list(basins_tot[ID].E_up.values())[1],
                list(basins_tot[ID].E_up.keys())[2],
                list(basins_tot[ID].E_up.values())[2],
                basins_tot[ID].E_own + sum(basins_tot[ID].E_up.values())
            ])
        else:
            db = db + [0, 0.0, 0, 0.0, 0, 0.0, basins_tot[ID].E_own]
        db = [float(d) for d in db]
        # FIXME: numpy.float is not accepted
        # TODO: change values, give only key and vals without key
        vec.table.update(basins_tot[ID].ID, db, cursor)

    # disconnect from server
    conn.commit()
    conn.close()
    vec.close()
示例#15
0
 def writing_points(self):
     """Write the generated random points to a vector map"""
     with VectorTopo(self.tmpname, mode="w", with_z=True) as vect:
         for x, y, z in zip(self.x, self.y, self.z):
             vect.write(Point(x, y, z))
def main():
    """Do the main processing
    """

    # Parse input options:
    patch_map = options['input']
    patches = patch_map.split('@')[0]
    patches_mapset = patch_map.split('@')[1] if len(
        patch_map.split('@')) > 1 else None
    pop_proxy = options['pop_proxy']
    layer = options['layer']
    costs = options['costs']
    cutoff = float(options['cutoff'])
    border_dist = int(options['border_dist'])
    conefor_dir = options['conefor_dir']
    memory = int(options['memory'])

    # Parse output options:
    prefix = options['prefix']
    edge_map = '{}_edges'.format(prefix)
    vertex_map = '{}_vertices'.format(prefix)
    shortest_paths = '{}_shortest_paths'.format(prefix)

    # Parse flags:
    p_flag = flags['p']
    t_flag = flags['t']
    r_flag = flags['r']

    dist_flags = 'kn' if flags['k'] else 'n'

    lin_cat = 1
    zero_dist = None

    folder = grass.tempdir()
    if not os.path.exists(folder):
        os.makedirs(folder)

    # Setup counter for progress message
    counter = 0

    # Check if location is lat/lon (only in lat/lon geodesic distance
    # measuring is supported)
    if grass.locn_is_latlong():
        grass.verbose("Location is lat/lon: Geodesic distance \
                      measure is used")

    # Check if prefix is legal GRASS name
    if not grass.legal_name(prefix):
        grass.fatal('{} is not a legal name for GRASS \
                    maps.'.format(prefix))

    if prefix[0].isdigit():
        grass.fatal('Tables names starting with a digit are not SQL \
                    compliant.'.format(prefix))

    # Check if output maps not already exists or could be overwritten
    for output in [edge_map, vertex_map, shortest_paths]:
        if grass.db.db_table_exist(output) and not grass.overwrite():
            grass.fatal('Vector map <{}> already exists'.format(output))

    # Check if input has required attributes
    in_db_connection = grass.vector.vector_db(patch_map)
    if not int(layer) in in_db_connection.keys():
        grass.fatal('No attribute table connected vector map {} at \
                    layer {}.'.format(patches, layer))

    #Check if cat column exists
    pcols = grass.vector.vector_columns(patch_map, layer=layer)

    #Check if cat column exists
    if not 'cat' in pcols.keys():
        grass.fatal('Cannot find the reqired column cat in vector map \
                    {}.'.format(patches))

    #Check if pop_proxy column exists
    if not pop_proxy in pcols.keys():
        grass.fatal('Cannot find column {} in vector map \
                    {}'.format(pop_proxy, patches))

    #Check if pop_proxy column is numeric type
    if not pcols[pop_proxy]['type'] in ['INTEGER', 'REAL', 'DOUBLE PRECISION']:
        grass.fatal('Column {} is of type {}. Only numeric types \
                    (integer or double precision) \
                    allowed!'.format(pop_proxy, pcols[pop_proxy]['type']))

    #Check if pop_proxy column does not contain values <= 0
    pop_vals = np.fromstring(grass.read_command('v.db.select',
                                                flags='c',
                                                map=patches,
                                                columns=pop_proxy,
                                                nv=-9999).rstrip('\n'),
                             dtype=float,
                             sep='\n')

    if np.min(pop_vals) <= 0:
        grass.fatal('Column {} contains values <= 0 or NULL. Neither \
                    values <= 0 nor NULL allowed!}'.format(pop_proxy))

    ##############################################
    # Use pygrass region instead of grass.parse_command !?!
    start_reg = grass.parse_command('g.region', flags='ugp')

    max_n = start_reg['n']
    min_s = start_reg['s']
    max_e = start_reg['e']
    min_w = start_reg['w']
    # cost_nsres = reg['nsres']
    # cost_ewres = reg['ewres']

    # Rasterize patches
    # http://www.gdal.org/gdal_tutorial.html
    # http://geoinformaticstutorial.blogspot.no/2012/11/convert-
    # shapefile-to-raster-with-gdal.html
    if t_flag:
        # Rasterize patches with "all-touched" mode using GDAL
        # Read region-settings (not needed canuse max_n, min_s, max_e,
        # min_w nsres, ewres...
        prast = os.path.join(folder, 'patches_rast.tif')

        # Check if GDAL-GRASS plugin is installed
        if ogr.GetDriverByName('GRASS'):
            #With GDAL-GRASS plugin
            #Locate file for patch vector map
            pfile = grass.parse_command('g.findfile',
                                        element='vector',
                                        file=patches,
                                        mapset=patches_mapset)['file']
            pfile = os.path.join(pfile, 'head')

        else:
            # Without GDAL-GRASS-plugin
            grass.warning("Cannot find GDAL-GRASS plugin. Consider \
                          installing it in order to save time for \
                          all-touched rasterisation")
            pfile = os.path.join(folder, 'patches_vect.gpkg')
            # Export patch vector map to temp-file in a GDAL-readable
            # format (shp)
            grass.run_command('v.out.ogr',
                              flags='m',
                              quiet=True,
                              input=patch_map,
                              type='area',
                              layer=layer,
                              output=pfile,
                              lco='GEOMETRY_NAME=geom')

        # Rasterize vector map with all-touched option
        os.system('gdal_rasterize -l {} -at -tr {} {} \
                  -te {} {} {} {} -ot Uint32 -a cat \
                  {} {} -q'.format(patches, start_reg['ewres'],
                                   start_reg['nsres'], start_reg['w'],
                                   start_reg['s'], start_reg['e'],
                                   start_reg['n'], pfile, prast))

        if not ogr.GetDriverByName('GRASS'):
            # Remove vector temp-file
            os.remove(os.path.join(folder, 'patches_vect.gpkg'))

        # Import rasterized patches
        grass.run_command('r.external',
                          flags='o',
                          quiet=True,
                          input=prast,
                          output='{}_patches_pol'.format(TMP_PREFIX))

    else:
        # Simple rasterisation (only area)
        # in G 7.6 also with support for 'centroid'
        if float(grass.version()['version'][:3]) >= 7.6:
            conv_types = ['area', 'centroid']
        else:
            conv_types = ['area']
        grass.run_command('v.to.rast',
                          quiet=True,
                          input=patches,
                          use='cat',
                          type=conv_types,
                          output='{}_patches_pol'.format(TMP_PREFIX))

    # Extract boundaries from patch raster map
    grass.run_command('r.mapcalc',
                      expression='{p}_patches_boundary=if(\
    {p}_patches_pol,\
    if((\
    (isnull({p}_patches_pol[-1,0])||| \
    {p}_patches_pol[-1,0]!={p}_patches_pol)||| \
    (isnull({p}_patches_pol[0,1])||| \
    {p}_patches_pol[0,1]!={p}_patches_pol)||| \
    (isnull({p}_patches_pol[1,0])||| \
    {p}_patches_pol[1,0]!={p}_patches_pol)||| \
    (isnull({p}_patches_pol[0,-1])||| \
    {p}_patches_pol[0,-1]!={p}_patches_pol)), \
    {p}_patches_pol,null()), null())'.format(p=TMP_PREFIX),
                      quiet=True)

    rasterized_cats = grass.read_command(
        'r.category',
        separator='newline',
        map='{p}_patches_boundary'.format(p=TMP_PREFIX)).replace(
            '\t', '').strip('\n')
    rasterized_cats = list(
        map(int, set([x for x in rasterized_cats.split('\n') if x != ''])))

    #Init output vector maps if they are requested by user
    network = VectorTopo(edge_map)
    network_columns = [(u'cat', 'INTEGER PRIMARY KEY'), (u'from_p', 'INTEGER'),
                       (u'to_p', 'INTEGER'), (u'min_dist', 'DOUBLE PRECISION'),
                       (u'dist', 'DOUBLE PRECISION'),
                       (u'max_dist', 'DOUBLE PRECISION')]
    network.open('w', tab_name=edge_map, tab_cols=network_columns)

    vertex = VectorTopo(vertex_map)
    vertex_columns = [
        (u'cat', 'INTEGER PRIMARY KEY'),
        (pop_proxy, 'DOUBLE PRECISION'),
    ]
    vertex.open('w', tab_name=vertex_map, tab_cols=vertex_columns)

    if p_flag:
        # Init cost paths file for start-patch
        grass.run_command('v.edit',
                          quiet=True,
                          map=shortest_paths,
                          tool='create')
        grass.run_command('v.db.addtable',
                          quiet=True,
                          map=shortest_paths,
                          columns="cat integer,\
                                   from_p integer,\
                                   to_p integer,\
                                   dist_min double precision,\
                                   dist double precision,\
                                   dist_max double precision")

    start_region_bbox = Bbox(north=float(max_n),
                             south=float(min_s),
                             east=float(max_e),
                             west=float(min_w))
    vpatches = VectorTopo(patches, mapset=patches_mapset)
    vpatches.open('r', layer=int(layer))

    ###Loop through patches
    vpatch_ids = np.array(vpatches.features_to_wkb_list(
        feature_type="centroid", bbox=start_region_bbox),
                          dtype=[('vid', 'uint32'), ('cat', 'uint32'),
                                 ('geom', '|S10')])
    cats = set(vpatch_ids['cat'])
    n_cats = len(cats)
    if n_cats < len(vpatch_ids['cat']):
        grass.verbose('At least one MultiPolygon found in patch map.\n \
                      Using average coordinates of the centroids for \
                      visual representation of the patch.')

    for cat in cats:
        if cat not in rasterized_cats:
            grass.warning('Patch {} has not been rasterized and will \
                          therefore not be treated as part of the \
                          network. Consider using t-flag or change \
                          resolution.'.format(cat))

            continue
        grass.verbose("Calculating connectivity-distances for patch \
                      number {}".format(cat))

        # Filter
        from_vpatch = vpatch_ids[vpatch_ids['cat'] == cat]

        # Get patch ID
        if from_vpatch['vid'].size == 1:
            from_centroid = Centroid(v_id=int(from_vpatch['vid']),
                                     c_mapinfo=vpatches.c_mapinfo)
            from_x = from_centroid.x
            from_y = from_centroid.y

            # Get centroid
            if not from_centroid:
                continue
        else:
            xcoords = []
            ycoords = []
            for f_p in from_vpatch['vid']:
                from_centroid = Centroid(v_id=int(f_p),
                                         c_mapinfo=vpatches.c_mapinfo)
                xcoords.append(from_centroid.x)
                ycoords.append(from_centroid.y)

                # Get centroid
                if not from_centroid:
                    continue
            from_x = np.average(xcoords)
            from_y = np.average(ycoords)

        # Get BoundingBox
        from_bbox = grass.parse_command('v.db.select',
                                        map=patch_map,
                                        flags='r',
                                        where='cat={}'.format(cat))

        attr_filter = vpatches.table.filters.select(pop_proxy)
        attr_filter = attr_filter.where("cat={}".format(cat))
        proxy_val = vpatches.table.execute().fetchone()

        # Prepare start patch
        start_patch = '{}_patch_{}'.format(TMP_PREFIX, cat)
        reclass_rule = u'{} = 1\n* = NULL'.format(cat)
        recl = grass.feed_command(
            'r.reclass',
            quiet=True,
            input='{}_patches_boundary'.format(TMP_PREFIX),
            output=start_patch,
            rules='-')
        recl.stdin.write(reclass_rule)
        recl.stdin.close()
        recl.wait()

        # Check if patch was rasterised (patches smaller raster resolution and close to larger patches may not be rasterised)
        #start_check = grass.parse_command('r.info', flags='r', map=start_patch)
        #start_check = grass.parse_command('r.univar', flags='g', map=start_patch)
        #print(start_check)
        """if start_check['min'] != '1':
            grass.warning('Patch {} has not been rasterized and will \
                          therefore not be treated as part of the \
                          network. Consider using t-flag or change \
                          resolution.'.format(cat))

            grass.run_command('g.remove', flags='f', vector=start_patch,
                              raster=start_patch, quiet=True)
            grass.del_temp_region()
            continue"""

        # Prepare stop patches
        ############################################
        reg = grass.parse_command('g.region',
                                  flags='ug',
                                  quiet=True,
                                  raster=start_patch,
                                  n=float(from_bbox['n']) + float(cutoff),
                                  s=float(from_bbox['s']) - float(cutoff),
                                  e=float(from_bbox['e']) + float(cutoff),
                                  w=float(from_bbox['w']) - float(cutoff),
                                  align='{}_patches_pol'.format(TMP_PREFIX))

        north = reg['n'] if max_n > reg['n'] else max_n
        south = reg['s'] if min_s < reg['s'] else min_s
        east = reg['e'] if max_e < reg['e'] else max_e
        west = reg['w'] if min_w > reg['w'] else min_w

        # Set region to patch search radius
        grass.use_temp_region()
        grass.run_command('g.region',
                          quiet=True,
                          n=north,
                          s=south,
                          e=east,
                          w=west,
                          align='{}_patches_pol'.format(TMP_PREFIX))

        # Create buffer around start-patch as a mask
        # for cost distance analysis
        grass.run_command('r.buffer',
                          quiet=True,
                          input=start_patch,
                          output='MASK',
                          distances=cutoff)
        grass.run_command('r.mapcalc',
                          quiet=True,
                          expression='{pf}_patch_{p}_neighbours_contur=\
                                     if({pf}_patches_boundary=={p},\
                                     null(),\
                                     {pf}_patches_boundary)'.format(
                              pf=TMP_PREFIX, p=cat))
        grass.run_command('r.mask', flags='r', quiet=True)

        # Calculate cost distance
        cost_distance_map = '{}_patch_{}_cost_dist'.format(prefix, cat)
        grass.run_command('r.cost',
                          flags=dist_flags,
                          quiet=True,
                          overwrite=True,
                          input=costs,
                          output=cost_distance_map,
                          start_rast=start_patch,
                          memory=memory)

        #grass.run_command('g.region', flags='up')
        # grass.raster.raster_history(cost_distance_map)
        cdhist = History(cost_distance_map)
        cdhist.clear()
        cdhist.creator = os.environ['USER']
        cdhist.write()
        # History object cannot modify description
        grass.run_command('r.support',
                          map=cost_distance_map,
                          description='Generated by r.connectivity.distance',
                          history=os.environ['CMDLINE'])

        # Export distance at boundaries
        maps = '{0}_patch_{1}_neighbours_contur,{2}_patch_{1}_cost_dist'
        maps = maps.format(TMP_PREFIX, cat, prefix),

        connections = StringIO(
            unicode(
                grass.read_command('r.stats',
                                   flags='1ng',
                                   quiet=True,
                                   input=maps,
                                   separator=';').rstrip('\n')))
        try:
            con_array = np.genfromtxt(connections,
                                      delimiter=';',
                                      dtype=None,
                                      names=['x', 'y', 'cat', 'dist'])
        except:
            grass.warning('No connections for patch {}'.format(cat))

            # Write centroid to vertex map
            vertex.write(Point(from_x, from_y), cat=int(cat), attrs=proxy_val)
            vertex.table.conn.commit()

            # Remove temporary map data
            grass.run_command('g.remove',
                              quiet=True,
                              flags='f',
                              type=['raster', 'vector'],
                              pattern="{}*{}*".format(TMP_PREFIX, cat))
            grass.del_temp_region()
            continue

        #Find closest points on neigbour patches
        to_cats = set(np.atleast_1d(con_array['cat']))
        to_coords = []
        for to_cat in to_cats:
            connection = con_array[con_array['cat'] == to_cat]
            connection.sort(order=['dist'])
            pixel = border_dist if len(
                connection) > border_dist else len(connection) - 1
            # closest_points_x = connection['x'][pixel]
            # closest_points_y = connection['y'][pixel]
            closest_points_to_cat = to_cat
            closest_points_min_dist = connection['dist'][0]
            closest_points_dist = connection['dist'][pixel]
            closest_points_max_dist = connection['dist'][-1]
            to_patch_ids = vpatch_ids[vpatch_ids['cat'] == int(to_cat)]['vid']

            if len(to_patch_ids) == 1:
                to_centroid = Centroid(v_id=to_patch_ids,
                                       c_mapinfo=vpatches.c_mapinfo)
                to_x = to_centroid.x
                to_y = to_centroid.y
            elif len(to_patch_ids) >= 1:
                xcoords = []
                ycoords = []
                for t_p in to_patch_ids:
                    to_centroid = Centroid(v_id=int(t_p),
                                           c_mapinfo=vpatches.c_mapinfo)
                    xcoords.append(to_centroid.x)
                    ycoords.append(to_centroid.y)

                    # Get centroid
                    if not to_centroid:
                        continue
                to_x = np.average(xcoords)
                to_y = np.average(ycoords)

            to_coords.append('{},{},{},{},{},{}'.format(
                connection['x'][0], connection['y'][0], to_cat,
                closest_points_min_dist, closest_points_dist,
                closest_points_max_dist))

            #Save edges to network dataset
            if closest_points_dist <= 0:
                zero_dist = 1

            # Write data to network
            network.write(Line([(from_x, from_y), (to_x, to_y)]),
                          cat=lin_cat,
                          attrs=(
                              cat,
                              int(closest_points_to_cat),
                              closest_points_min_dist,
                              closest_points_dist,
                              closest_points_max_dist,
                          ))
            network.table.conn.commit()

            lin_cat = lin_cat + 1

        # Save closest points and shortest paths through cost raster as
        # vector map (r.drain limited to 1024 points) if requested
        if p_flag:
            grass.verbose('Extracting shortest paths for patch number \
                          {}...'.format(cat))

            points_n = len(to_cats)

            tiles = int(points_n / 1024.0)
            rest = points_n % 1024
            if not rest == 0:
                tiles = tiles + 1

            tile_n = 0
            while tile_n < tiles:
                tile_n = tile_n + 1
                #Import closest points for start-patch in 1000er blocks
                sp = grass.feed_command('v.in.ascii',
                                        flags='nr',
                                        overwrite=True,
                                        quiet=True,
                                        input='-',
                                        stderr=subprocess.PIPE,
                                        output="{}_{}_cp".format(
                                            TMP_PREFIX, cat),
                                        separator=",",
                                        columns="x double precision,\
                                           y double precision,\
                                           to_p integer,\
                                           dist_min double precision,\
                                           dist double precision,\
                                           dist_max double precision")
                sp.stdin.write("\n".join(to_coords))
                sp.stdin.close()
                sp.wait()

                # Extract shortest paths for start-patch in chunks of
                # 1024 points
                cost_paths = "{}_{}_cost_paths".format(TMP_PREFIX, cat)
                start_points = "{}_{}_cp".format(TMP_PREFIX, cat)
                grass.run_command('r.drain',
                                  overwrite=True,
                                  quiet=True,
                                  input=cost_distance_map,
                                  output=cost_paths,
                                  drain=cost_paths,
                                  start_points=start_points)

                grass.run_command('v.db.addtable',
                                  map=cost_paths,
                                  quiet=True,
                                  columns="cat integer,\
                                   from_p integer,\
                                   to_p integer,\
                                   dist_min double precision,\
                                   dist double precision,\
                                   dist_max double precision")
                grass.run_command('v.db.update',
                                  map=cost_paths,
                                  column='from_p',
                                  value=cat,
                                  quiet=True)
                grass.run_command('v.distance',
                                  quiet=True,
                                  from_=cost_paths,
                                  to=start_points,
                                  upload='to_attr',
                                  column='to_p',
                                  to_column='to_p')
                grass.run_command('v.db.join',
                                  quiet=True,
                                  map=cost_paths,
                                  column='to_p',
                                  other_column='to_p',
                                  other_table=start_points,
                                  subset_columns='dist_min,dist,dist_max')

                #grass.run_command('v.info', flags='c',
                #                  map=cost_paths)
                grass.run_command('v.patch',
                                  flags='ae',
                                  overwrite=True,
                                  quiet=True,
                                  input=cost_paths,
                                  output=shortest_paths)

                # Remove temporary map data
                grass.run_command('g.remove',
                                  quiet=True,
                                  flags='f',
                                  type=['raster', 'vector'],
                                  pattern="{}*{}*".format(TMP_PREFIX, cat))

        # Remove temporary map data for patch
        if r_flag:
            grass.run_command('g.remove',
                              flags='f',
                              type='raster',
                              name=cost_distance_map,
                              quiet=True)

        vertex.write(Point(from_x, from_y), cat=int(cat), attrs=proxy_val)

        vertex.table.conn.commit()

        # Print progress message
        grass.percent(i=int((float(counter) / n_cats) * 100), n=100, s=3)

        # Update counter for progress message
        counter = counter + 1

    if zero_dist:
        grass.warning('Some patches are directly adjacent to others. \
                       Minimum distance set to 0.0000000001')

    # Close vector maps and build topology
    network.close()
    vertex.close()

    # Add vertex attributes
    # grass.run_command('v.db.addtable', map=vertex_map)
    # grass.run_command('v.db.join', map=vertex_map, column='cat',
    #                   other_table=in_db_connection[int(layer)]['table'],
    #                   other_column='cat', subset_columns=pop_proxy,
    #                   quiet=True)

    # Add history and meta data to produced maps
    grass.run_command('v.support',
                      flags='h',
                      map=edge_map,
                      person=os.environ['USER'],
                      cmdhist=os.environ['CMDLINE'])

    grass.run_command('v.support',
                      flags='h',
                      map=vertex_map,
                      person=os.environ['USER'],
                      cmdhist=os.environ['CMDLINE'])

    if p_flag:
        grass.run_command('v.support',
                          flags='h',
                          map=shortest_paths,
                          person=os.environ['USER'],
                          cmdhist=os.environ['CMDLINE'])

    # Output also Conefor files if requested
    if conefor_dir:
        query = """SELECT p_from, p_to, avg(dist) FROM
                 (SELECT
                 CASE
                 WHEN from_p > to_p THEN to_p
                 ELSE from_p END AS p_from,
                    CASE
                 WHEN from_p > to_p THEN from_p
                 ELSE to_p END AS p_to,
                 dist
                 FROM {}) AS x
                 GROUP BY p_from, p_to""".format(edge_map)
        with open(os.path.join(conefor_dir, 'undirected_connection_file'),
                  'w') as edges:
            edges.write(
                grass.read_command('db.select', sql=query, separator=' '))
        with open(os.path.join(conefor_dir, 'directed_connection_file'),
                  'w') as edges:
            edges.write(
                grass.read_command('v.db.select',
                                   map=edge_map,
                                   separator=' ',
                                   flags='c'))
        with open(os.path.join(conefor_dir, 'node_file'), 'w') as nodes:
            nodes.write(
                grass.read_command('v.db.select',
                                   map=vertex_map,
                                   separator=' ',
                                   flags='c'))
示例#17
0
def create_test_stream_network_map(map_name="streams"):
    """
       This functions creates a vector map layer with lines that represent
       a stream network with two different graphs. The first graph
       contains a loop, the second can be used as directed graph.

       This should be used in doc and unit tests to create location/mapset
       independent vector map layer.

        param map_name: The vector map name that should be used

       1(0,2)  3(2,2)
        \     /
       1 \   / 2
          \ /
           2(1,1)
    6(0,1) ||  5(2,1)
       5 \ || / 4
          \||/
           4(1,0)
           |
           | 6
           |7(1,-1)

       7(0,-1) 8(2,-1)
        \     /
       8 \   / 9
          \ /
           9(1, -2)
           |
           | 10
           |
          10(1,-3)
    """

    from grass.pygrass.vector import VectorTopo
    from grass.pygrass.vector.geometry import Line

    cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'id', 'INTEGER')]
    with VectorTopo(map_name, mode='w', tab_name=map_name,
                    tab_cols=cols) as streams:

        # First flow graph
        l = Line([(0, 2), (0.22, 1.75), (0.55, 1.5), (1, 1)])
        streams.write(l, cat=1, attrs=(1, ))
        l = Line([(2, 2), (1, 1)])
        streams.write(l, cat=2, attrs=(2, ))
        l = Line([(1, 1), (0.85, 0.5), (1, 0)])
        streams.write(l, cat=3, attrs=(3, ))
        l = Line([(2, 1), (1, 0)])
        streams.write(l, cat=4, attrs=(4, ))
        l = Line([(0, 1), (1, 0)])
        streams.write(l, cat=5, attrs=(5, ))
        l = Line([(1, 0), (1, -1)])
        streams.write(l, cat=6, attrs=(6, ))
        # Reverse line 3
        l = Line([(1, 0), (1.15, 0.5), (1, 1)])
        streams.write(l, cat=7, attrs=(7, ))

        # second flow graph
        l = Line([(0, -1), (1, -2)])
        streams.write(l, cat=8, attrs=(8, ))
        l = Line([(2, -1), (1, -2)])
        streams.write(l, cat=9, attrs=(9, ))
        l = Line([(1, -2), (1, -3)])
        streams.write(l, cat=10, attrs=(10, ))

        streams.organization = 'Thuenen Institut'
        streams.person = 'Soeren Gebbert'
        streams.title = 'Test dataset for stream networks'
        streams.comment = 'This is a comment'

        streams.table.conn.commit()
        streams.close()
示例#18
0
def main():
    """
    Builds a grid for the MODFLOW component of the USGS hydrologic model,
    GSFLOW.
    """

    options, flags = gscript.parser()
    basin = options['basin']
    pp = options['pour_point']
    raster_input = options['raster_input']
    dx = options['dx']
    dy = options['dy']
    grid = options['output']
    mask = options['mask_output']
    bc_cell = options['bc_cell']
    # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp'
    """
    # Fatal if raster input and output are not both set
    _lena0 = (len(raster_input) == 0)
    _lenb0 = (len(raster_output) == 0)
    if _lena0 + _lenb0 == 1:
        gscript.fatal("You must set both raster input and output, or neither.")
    """

    # Fatal if bc_cell set but mask and grid are false
    if bc_cell != '':
        if (mask == '') or (pp == ''):
            gscript.fatal(
                'Mask and pour point must be set to define b.c. cell')

    # Create grid -- overlaps DEM, three cells of padding
    gscript.use_temp_region()
    reg = gscript.region()
    reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows'])
    reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols'])
    g.region(vector=basin, ewres=dx, nsres=dy)
    regnew = gscript.region()
    # Use a grid ratio -- don't match exactly the desired MODFLOW resolution
    grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres'])
    grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres'])
    # Get S, W, and then move the unit number of grid cells over to get N and E
    # and include 3 cells of padding around the whole watershed
    _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres']))
    _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0]
    _s = float(reg_grid_edges_sn[_s_idx])
    _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'],
                        grid_ratio_ns * reg['nsres'])
    _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres']))
    _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0]
    _n = float(_n_grid[_n_idx])
    _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres']))
    _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0]
    _w = float(reg_grid_edges_we[_w_idx])
    _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'],
                        grid_ratio_ew * reg['ewres'])
    _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres']))
    _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0]
    _e = float(_e_grid[_e_idx])
    # Finally make the region
    g.region(w=str(_w),
             e=str(_e),
             s=str(_s),
             n=str(_n),
             nsres=str(grid_ratio_ns * reg['nsres']),
             ewres=str(grid_ratio_ew * reg['ewres']))
    # And then make the grid
    v.mkgrid(map=grid, overwrite=gscript.overwrite())

    # Cell numbers (row, column, continuous ID)
    v.db_addcolumn(map=grid, columns='id int', quiet=True)
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:, colNames == 'row'].astype(int).squeeze()
    cols = colValues[:, colNames == 'col'].astype(int).squeeze()
    nrows = np.max(rows)
    ncols = np.max(cols)
    cats = np.ravel([cats])
    _id = np.ravel([ncols * (rows - 1) + cols])
    _id_cat = []
    for i in range(len(_id)):
        _id_cat.append((_id[i], cats[i]))
    gridTopo = VectorTopo(grid)
    gridTopo.open('rw')
    cur = gridTopo.table.conn.cursor()
    cur.executemany("update " + grid + " set id=? where cat=?", _id_cat)
    gridTopo.table.conn.commit()
    gridTopo.close()

    # Cell area
    v.db_addcolumn(map=grid, columns='area_m2', quiet=True)
    v.to_db(map=grid,
            option='area',
            units='meters',
            columns='area_m2',
            quiet=True)

    # Basin mask
    if len(mask) > 0:
        # Fine resolution region:
        g.region(n=reg['n'],
                 s=reg['s'],
                 w=reg['w'],
                 e=reg['e'],
                 nsres=reg['nsres'],
                 ewres=reg['ewres'])
        # Rasterize basin
        v.to_rast(input=basin,
                  output=mask,
                  use='val',
                  value=1,
                  overwrite=gscript.overwrite(),
                  quiet=True)
        # Coarse resolution region:
        g.region(w=str(_w),
                 e=str(_e),
                 s=str(_s),
                 n=str(_n),
                 nsres=str(grid_ratio_ns * reg['nsres']),
                 ewres=str(grid_ratio_ew * reg['ewres']))
        r.resamp_stats(input=mask,
                       output=mask,
                       method='sum',
                       overwrite=True,
                       quiet=True)
        r.mapcalc('tmp' + ' = ' + mask + ' > 0', overwrite=True, quiet=True)
        g.rename(raster=('tmp', mask), overwrite=True, quiet=True)
        r.null(map=mask, null=0, quiet=True)
        # Add mask location (1 vs 0) in the MODFLOW grid
        v.db_addcolumn(map=grid,
                       columns='basinmask double precision',
                       quiet=True)
        v.what_rast(map=grid, type='centroid', raster=mask, column='basinmask')
    """
    # Resampled raster
    if len(raster_output) > 0:
        r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True)
    """

    # Pour point
    if len(pp) > 0:
        v.db_addcolumn(map=pp,
                       columns=('row integer', 'col integer'),
                       quiet=True)
        v.build(map=pp, quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='row',
                    query_column='row',
                    quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='col',
                    query_column='col',
                    quiet=True)

    # Next point downstream of the pour point
    # Requires pp (always) and mask (sometimes)
    # Dependency set above w/ gscript.fatal
    if len(bc_cell) > 0:
        ########## NEED TO USE TRUE TEMPORARY FILE ##########
        # May not work with dx != dy!
        v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True)
        r.buffer(input='tmp',
                 output='tmp',
                 distances=float(dx) * 1.5,
                 overwrite=True)
        r.mapcalc('tmp2 = if(tmp==2,1,null()) * ' + raster_input,
                  overwrite=True)
        g.rename(raster=('tmp2', 'tmp'), overwrite=True, quiet=True)
        #r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True)
        #g.region(rast='tmp')
        #r.null(map=raster_input,
        r.drain(input=raster_input,
                start_points=pp,
                output='tmp2',
                overwrite=True)
        r.mapcalc('tmp3 = tmp2 * tmp', overwrite=True, quiet=True)
        g.rename(raster=('tmp3', 'tmp'), overwrite=True, quiet=True)
        #r.null(map='tmp', setnull=0) # Not necessary: center point removed above
        r.to_vect(input='tmp',
                  output=bc_cell,
                  type='point',
                  column='z',
                  overwrite=gscript.overwrite(),
                  quiet=True)
        v.db_addcolumn(map=bc_cell,
                       columns=('row integer', 'col integer',
                                'x double precision', 'y double precision'),
                       quiet=True)
        v.build(map=bc_cell, quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='row', \
                    query_column='row', quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='col', \
                    query_column='col', quiet=True)
        v.to_db(map=bc_cell, option='coor', columns=('x,y'))

        # Find out if this is diagonal: finite difference works only N-S, W-E
        colNames = np.array(gscript.vector_db_select(pp, layer=1)['columns'])
        colValues = np.array(
            gscript.vector_db_select(pp, layer=1)['values'].values())
        pp_row = int(colValues[:, colNames == 'row'].astype(int).squeeze())
        pp_col = int(colValues[:, colNames == 'col'].astype(int).squeeze())
        colNames = np.array(
            gscript.vector_db_select(bc_cell, layer=1)['columns'])
        colValues = np.array(
            gscript.vector_db_select(bc_cell, layer=1)['values'].values())
        bc_row = int(colValues[:, colNames == 'row'].astype(int).squeeze())
        bc_col = int(colValues[:, colNames == 'col'].astype(int).squeeze())
        # Also get x and y while we are at it: may be needed later
        bc_x = float(colValues[:, colNames == 'x'].astype(float).squeeze())
        bc_y = float(colValues[:, colNames == 'y'].astype(float).squeeze())
        if (bc_row != pp_row) and (bc_col != pp_col):
            # If not diagonal, two possible locations that are adjacent
            # to the pour point
            _col1, _row1 = str(bc_col), str(pp_row)
            _col2, _row2 = str(pp_col), str(bc_row)
            # Check if either of these is covered by the basin mask
            _ismask_1 = gscript.vector_db_select(grid,
                                                 layer=1,
                                                 where='(row == ' + _row1 +
                                                 ') AND (col ==' + _col1 + ')',
                                                 columns='basinmask')
            _ismask_1 = int(_ismask_1['values'].values()[0][0])
            _ismask_2 = gscript.vector_db_select(grid,
                                                 layer=1,
                                                 where='(row == ' + _row2 +
                                                 ') AND (col ==' + _col2 + ')',
                                                 columns='basinmask')
            _ismask_2 = int(_ismask_2['values'].values()[0][0])
            # If both covered by mask, error
            if _ismask_1 and _ismask_2:
                gscript.fatal(
                    'All possible b.c. cells covered by basin mask.\n\
                             Contact the developer: awickert (at) umn(.)edu')
            # Otherwise, those that keep those that are not covered by basin
            # mask and set ...
            # ... wait, do we want the point that touches as few interior
            # cells as possible?
            # maybe just try setting both and seeing what happens for now!
            else:
                # Get dx and dy
                dx = gscript.region()['ewres']
                dy = gscript.region()['nsres']
                # Build tool to handle multiple b.c. cells?
                bcvect = vector.Vector(bc_cell)
                bcvect.open('rw')
                _cat_i = 2
                if not _ismask_1:
                    # _x should always be bc_x, but writing generalized code
                    _x = bc_x + dx * (int(_col1) - bc_col)  # col 1 at w edge
                    _y = bc_y - dy * (int(_row1) - bc_row)  # row 1 at n edge
                    point0 = Point(_x, _y)
                    bcvect.write(
                        point0,
                        cat=_cat_i,
                        attrs=(None, None, _row1, _col1, _x, _y),
                    )
                    bcvect.table.conn.commit()
                    _cat_i += 1
                if not _ismask_2:
                    # _y should always be bc_y, but writing generalized code
                    _x = bc_x + dx * (int(_col2) - bc_col)  # col 1 at w edge
                    _y = bc_y - dy * (int(_row2) - bc_row)  # row 1 at n edge
                    point0 = Point(_x, _y)
                    bcvect.write(
                        point0,
                        cat=_cat_i,
                        attrs=(None, None, _row2, _col2, _x, _y),
                    )
                    bcvect.table.conn.commit()
                # Build database table and vector geometry
                bcvect.build()
                bcvect.close()

    g.region(n=reg['n'],
             s=reg['s'],
             w=reg['w'],
             e=reg['e'],
             nsres=reg['nsres'],
             ewres=reg['ewres'])
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

zachranka = VectorTopo('adresnimista_zachranka', mapset='ruian_praha')
zachranka.open('r')
ulice = VectorTopo('ulice', mapset='ruian_praha')
ulice.open('r')

zu = VectorTopo('zachranka_ulice')
cols = [('cat',       'INTEGER PRIMARY KEY'),
        ('kod',       'INTEGER'),
        ('ulice',     'TEXT'),
        ('nespravny', 'INTEGER')]
zu.open('w', tab_cols=cols)

seznam = []
for z in zachranka:
    u = ulice.find['by_point'].geo(z, maxdist=1000.)
    if u is None:
        continue
    nespravny = z.attrs['ulicekod'] != u.attrs['kod']
    print (u'{:10} {:1} {}'.format(z.attrs['kod'], nespravny, u.attrs['nazev']))
    zu.write(z, (z.attrs['kod'], u.attrs['nazev'], nespravny))
    if u.cat not in seznam:
        zu.write(u, (None, u.attrs['nazev'], None))
        seznam.append(u.cat)

zu.table.conn.commit() # nutne pro zapis atributu !!!

zu.close()
def main(opts, flgs):
    TMPRAST, TMPVECT, DEBUG = [], [], flgs['d']
    atexit.register(cleanup, raster=TMPRAST, vector=TMPVECT, debug=DEBUG)
    OVW = gcore.overwrite()

    dtm = options['elevation']
    river = options['river']  # raster
    discharge_current = options['discharge_current']  # vec
    discharge_natural = options['discharge_natural']  # vec
    mfd = options['mfd']
    len_plant = options['len_plant']
    len_min = options['len_min']
    distance = options['distance']
    output_plant = options['output_plant']
    area = options['area']
    buff = options['buff']
    efficiency = options['efficiency']
    DEBUG = flags['d']
    points_view = options['points_view']
    new_region = options['visibility_resolution']
    final_vis = options['output_vis']
    n_points = options['n_points']
    p_min = options['p_min']
    percentage = options['percentage']
    msgr = get_msgr()

    # set the region
    info = gcore.parse_command('g.region', flags='m')
    if (info['nsres'] == 0) or (info['ewres'] == 0):
        msgr.warning("set region to elevation raster")
        gcore.run_command('g.region', raster=dtm)

    pid = os.getpid()

    if area:
        if float(buff):
            area_tmp = 'tmp_buff_area_%05d' % pid
            gcore.run_command('v.buffer',
                              input=area,
                              output=area_tmp,
                              distance=buff,
                              overwrite=OVW)
            area = area_tmp
            TMPVECT.append(area)
        oriver = 'tmp_river_%05d' % pid
        gcore.run_command('v.overlay',
                          flags='t',
                          ainput=river,
                          binput=area,
                          operator='not',
                          output=oriver,
                          overwrite=OVW)
        river = oriver
        TMPVECT.append(oriver)

    if points_view:
        info_old = gcore.parse_command('g.region', flags='pg')
        set_new_region(new_region)
        pl, mset = points_view.split('@') if '@' in points_view else (
            points_view, '')
        vec = VectorTopo(pl, mapset=mset, mode='r')
        vec.open("r")
        string = '0'
        for i, point in enumerate(vec):
            out = 'tmp_visual_%05d_%03d' % (pid, i)
            gcore.run_command(
                'r.viewshed',
                input=dtm,
                output=out,
                coordinates=point.coords(),
                overwrite=OVW,
                memory=1000,
                flags='b',
                max_distance=4000,
            )
            TMPRAST.append(out)
            # we use 4 km sice it the human limit
            string = string + ('+%s' % out)
        #import pdb; pdb.set_trace()

        tmp_final_vis = 'tmp_final_vis_%05d' % pid
        formula = '%s = %s' % (tmp_final_vis, string)
        TMPRAST.append(tmp_final_vis)
        mapcalc(formula, overwrite=OVW)
        # change to old region
        set_old_region(info_old)
        TMPVECT.append(tmp_final_vis)
        gcore.run_command('r.to.vect',
                          flags='v',
                          overwrite=OVW,
                          input=tmp_final_vis,
                          output=tmp_final_vis,
                          type='area')
        if int(n_points) > 0:
            where = 'cat<%s' % (n_points)
        else:
            where = 'cat=0'
        gcore.run_command('v.db.droprow',
                          input=tmp_final_vis,
                          where=where,
                          output=final_vis,
                          overwrite=OVW)
        tmp_river = 'tmp_river2_%05d' % pid
        gcore.run_command('v.overlay',
                          flags='t',
                          ainput=river,
                          binput=final_vis,
                          operator='not',
                          output=tmp_river,
                          overwrite=OVW)
        river = tmp_river
        TMPVECT.append(tmp_river)

        #import pdb; pdb.set_trace()

    tmp_disch = 'tmp_discharge_%05d' % pid
    if mfd:
        formula = '%s=%s-%s' % (tmp_disch, discharge_current, mfd)
        mapcalc(formula, overwrite=OVW)
        TMPRAST.append(tmp_disch)
        discharge_current = tmp_disch

    elif discharge_natural:
        formula = '%s=%s-%s*%s/100.0' % (tmp_disch, discharge_current,
                                         discharge_natural, percentage)
        mapcalc(formula, overwrite=OVW)
        formula = '%s=if(%s>0, %s, 0)' % (tmp_disch, tmp_disch, tmp_disch)
        mapcalc(formula, overwrite=True)
        TMPRAST.append(tmp_disch)
        discharge_current = tmp_disch

    gcore.run_command('r.green.hydro.optimal',
                      flags='c',
                      discharge=discharge_current,
                      river=river,
                      elevation=dtm,
                      len_plant=len_plant,
                      output_plant=output_plant,
                      distance=distance,
                      len_min=len_min,
                      efficiency=efficiency,
                      p_min=p_min)
    power2energy(output_plant, 'pot_power', float(options['n']))
    print('r.green.hydro.recommended completed!')
示例#21
0
    def extract_points(self, vect_name, fields, na_rm=True, as_df=False):
        """Samples a list of GRASS rasters using a point dataset

        Parameters
        ----------
        vect_name : str
            Name of GRASS GIS vector containing point features.
            
        fields : list, str
            Name of attribute(s) containing the vect_name variable(s).
            
        na_rm : bool (opt). Default is True
            Whether to remove samples containing NaNs.
        
        as_df : bool (opt). Default is False.
            Whether to return the extracted RasterStack values as a Pandas
            DataFrame.

        Returns
        -------
        X : ndarray
            2d array containing the extracted raster values with the dimensions
            ordered by (n_samples, n_features).
            
        y : ndarray
            1d or 2d array of labels with the dimensions ordered by 
            (n_samples, n_fields).
                
        df : pandas.DataFrame
            Extracted raster values as Pandas DataFrame if as_df = True.
        """

        if isinstance(fields, str):
            fields = [fields]

        vname = vect_name.split("@")[0]

        try:
            mapset = vect_name.split("@")[1]
        except IndexError:
            mapset = g.mapset(flags="p", stdout_=PIPE).outputs.stdout.split(os.linesep)[
                0
            ]

        # open grass vector
        with VectorTopo(name=vname, mapset=mapset, mode="r") as points:

            # retrieve key column
            key_col = points.table.key

            # read attribute table (ignores region)
            df = pd.read_sql_query(
                sql="select * from {name}".format(name=points.table.name), con=points.table.conn
            )

            for i in fields:
                if i not in df.columns.tolist():
                    gs.fatal(i + " not present in the attribute table")

            df = df.loc[:, fields + [points.table.key]]

            # extract raster data
            Xs = []

            for name, layer in self.loc.items():
                rast_data = v.what_rast(
                    map=vect_name,
                    raster=layer.fullname(),
                    flags="p",
                    quiet=True,
                    stdout_=PIPE,
                ).outputs.stdout.strip().split(os.linesep)

                with RasterRow(layer.fullname()) as src:
                    if src.mtype == "CELL":
                        nodata = self._cell_nodata
                        dtype = pd.Int64Dtype()
                    else:
                        nodata = np.nan
                        dtype = np.float32

                    X = [k.split("|")[1] if k.split("|")[1] != "*" else nodata for k in rast_data]
                    X = np.asarray(X)
                    cat = np.asarray([int(k.split("|")[0]) for k in rast_data])

                    if src.mtype == "CELL":
                        X = [int(i) for i in X]
                    else:
                        X = [float(i) for i in X]

                X = pd.DataFrame(data=np.column_stack((X, cat)), columns=[name, key_col])
                X[name] = X[name].astype(dtype)
                Xs.append(X)

        for X in Xs:
            df = df.merge(X, on=key_col)

        # set any grass integer nodata values to NaN
        df = df.replace(self._cell_nodata, np.nan)

        # remove rows with missing response data
        df = df.dropna(subset=fields)

        # remove samples containing NaNs
        if na_rm is True:
            gs.message("Removing samples with NaN values in the raster feature variables...")
            df = df.dropna()

        if as_df is False:
            if len(fields) == 1:
                fields = fields[0]

            X = df.loc[:, df.columns.isin(self.loc.keys())].values
            y = np.asarray(df.loc[:, fields].values)
            cat = np.asarray(df.loc[:, key_col].values)

            return X, y, cat

        return df
示例#22
0
def main():
    inputraster = options["input"]
    number_lines = int(options["number_lines"])
    edge_detection_algorithm = options["edge_detection"]
    no_edge_friction = int(options["no_edge_friction"])
    lane_border_multiplier = int(options["lane_border_multiplier"])
    min_tile_size = None
    if options["min_tile_size"]:
        min_tile_size = float(options["min_tile_size"])
    existing_cutlines = None
    if options["existing_cutlines"]:
        existing_cutlines = options["existing_cutlines"].split(",")
    tiles = options["output"]
    memory = int(options["memory"])
    tiled = False

    if options["tile_width"]:
        tiled = True
        gscript.message(_("Using tiles processing for edge detection"))
        width = int(options["tile_width"])
        height = int(options["tile_height"])
        overlap = int(options["overlap"])

    processes = int(options["processes"])

    global temp_maps
    temp_maps = []
    r = "raster"
    v = "vector"

    if existing_cutlines:
        existingcutlinesmap = "temp_icutlines_existingcutlinesmap_%i" % os.getpid(
        )
        if len(existing_cutlines) > 1:
            gscript.run_command(
                "v.patch",
                input_=existing_cutlines,
                output=existingcutlinesmap,
                quiet=True,
                overwrite=True,
            )
            existing_cutlines = existingcutlinesmap

        gscript.run_command(
            "v.to.rast",
            input_=existing_cutlines,
            output=existingcutlinesmap,
            use="val",
            type_="line,boundary",
            overwrite=True,
            quiet=True,
        )

        temp_maps.append([existingcutlinesmap, r])

    temp_edge_map = "temp_icutlines_edgemap_%d" % os.getpid()
    temp_maps.append([temp_edge_map, r])

    gscript.message(
        _("Creating edge map using <%s> edgedetection algorithm") %
        edge_detection_algorithm)
    if edge_detection_algorithm == "zc":
        kwargs = {
            "input": inputraster,
            "output": temp_edge_map,
            "width_": int(options["zc_width"]),
            "threshold": float(options["zc_threshold"]),
            "quiet": True,
        }

        if tiled:
            grd = GridModule("i.zc",
                             width=width,
                             height=height,
                             overlap=overlap,
                             processes=processes,
                             split=False,
                             **kwargs)
            grd.run()
        else:
            gscript.run_command("i.zc", **kwargs)

    elif edge_detection_algorithm == "canny":
        if not gscript.find_program("i.edge", "--help"):
            message = _("You need to install the addon i.edge to use ")
            message += _("the Canny edge detector.\n")
            message += _(
                " You can install the addon with 'g.extension i.edge'")
            gscript.fatal(message)

        kwargs = {
            "input": inputraster,
            "output": temp_edge_map,
            "low_threshold": float(options["canny_low_threshold"]),
            "high_threshold": float(options["canny_high_threshold"]),
            "sigma": float(options["canny_sigma"]),
            "quiet": True,
        }

        if tiled:
            grd = GridModule("i.edge",
                             width=width,
                             height=height,
                             overlap=overlap,
                             processes=processes,
                             split=False,
                             flags="n",
                             **kwargs)
            grd.run()
        else:
            gscript.run_command("i.edge", flags="n", **kwargs)

    else:
        gscript.fatal(
            "Only zero-crossing and Canny available as edge detection algorithms."
        )

    region = gscript.region()
    gscript.message(_("Finding cutlines in both directions"))

    nsrange = float(region.n - region.s - region.nsres)
    ewrange = float(region.e - region.w - region.ewres)

    if nsrange > ewrange:
        hnumber_lines = number_lines
        vnumber_lines = max(int(number_lines * (ewrange / nsrange)), 1)
    else:
        vnumber_lines = number_lines
        hnumber_lines = max(int(number_lines * (nsrange / ewrange)), 1)

    # Create the lines in horizonal direction
    nsstep = float(region.n - region.s - region.nsres) / hnumber_lines
    hpointsy = [((region.n - i * nsstep) - region.nsres / 2.0)
                for i in range(0, hnumber_lines + 1)]
    hlanepointsy = [y - nsstep / 2.0 for y in hpointsy]
    hstartpoints = listzip([region.w + 0.2 * region.ewres] * len(hpointsy),
                           hpointsy)
    hstoppoints = listzip([region.e - 0.2 * region.ewres] * len(hpointsy),
                          hpointsy)
    hlanestartpoints = listzip([region.w + 0.2 * region.ewres] *
                               len(hlanepointsy), hlanepointsy)
    hlanestoppoints = listzip([region.e - 0.2 * region.ewres] *
                              len(hlanepointsy), hlanepointsy)

    hlanemap = "temp_icutlines_hlanemap_%i" % os.getpid()
    temp_maps.append([hlanemap, v])
    temp_maps.append([hlanemap, r])

    os.environ["GRASS_VERBOSE"] = "0"
    new = VectorTopo(hlanemap)
    new.open("w")
    for line in listzip(hlanestartpoints, hlanestoppoints):
        new.write(geom.Line(line), cat=1)
    new.close()
    del os.environ["GRASS_VERBOSE"]

    gscript.run_command(
        "v.to.rast",
        input_=hlanemap,
        output=hlanemap,
        use="val",
        type_="line",
        overwrite=True,
        quiet=True,
    )

    hbasemap = "temp_icutlines_hbasemap_%i" % os.getpid()
    temp_maps.append([hbasemap, r])

    # Building the cost maps using the following logic
    # - Any pixel not on an edge, nor on an existing cutline gets a
    # no_edge_friction cost, or no_edge_friction_cost x 10  if there are
    # existing cutlines
    # - Any pixel on an edge gets a cost of 1 if there are no existing cutlines,
    # and a cost of no_edge_friction if there are
    # - A lane line gets a very high cost (lane_border_multiplier x cost of no
    # edge pixel - the latter depending on the existence of cutlines).

    mapcalc_expression = "%s = " % hbasemap
    mapcalc_expression += "if(isnull(%s), " % hlanemap
    if existing_cutlines:
        mapcalc_expression += "if(%s == 0 && isnull(%s), " % (
            temp_edge_map,
            existingcutlinesmap,
        )
        mapcalc_expression += "%i, " % (no_edge_friction * 10)
        mapcalc_expression += "if(isnull(%s), %s, 1))," % (
            existingcutlinesmap,
            no_edge_friction,
        )
        mapcalc_expression += "%i)" % (lane_border_multiplier *
                                       no_edge_friction * 10)
    else:
        mapcalc_expression += "if(%s == 0, " % temp_edge_map
        mapcalc_expression += "%i, " % no_edge_friction
        mapcalc_expression += "1), "
        mapcalc_expression += "%i)" % (lane_border_multiplier *
                                       no_edge_friction)
    gscript.run_command("r.mapcalc",
                        expression=mapcalc_expression,
                        quiet=True,
                        overwrite=True)

    hcumcost = "temp_icutlines_hcumcost_%i" % os.getpid()
    temp_maps.append([hcumcost, r])
    hdir = "temp_icutlines_hdir_%i" % os.getpid()
    temp_maps.append([hdir, r])

    # Create the lines in vertical direction
    ewstep = float(region.e - region.w - region.ewres) / vnumber_lines
    vpointsx = [((region.e - i * ewstep) - region.ewres / 2.0)
                for i in range(0, vnumber_lines + 1)]
    vlanepointsx = [x + ewstep / 2.0 for x in vpointsx]
    vstartpoints = listzip(vpointsx,
                           [region.n - 0.2 * region.nsres] * len(vpointsx))
    vstoppoints = listzip(vpointsx,
                          [region.s + 0.2 * region.nsres] * len(vpointsx))
    vlanestartpoints = listzip(vlanepointsx, [region.n - 0.2 * region.nsres] *
                               len(vlanepointsx))
    vlanestoppoints = listzip(vlanepointsx, [region.s + 0.2 * region.nsres] *
                              len(vlanepointsx))

    vlanemap = "temp_icutlines_vlanemap_%i" % os.getpid()
    temp_maps.append([vlanemap, v])
    temp_maps.append([vlanemap, r])

    os.environ["GRASS_VERBOSE"] = "0"
    new = VectorTopo(vlanemap)
    new.open("w")
    for line in listzip(vlanestartpoints, vlanestoppoints):
        new.write(geom.Line(line), cat=1)
    new.close()
    del os.environ["GRASS_VERBOSE"]

    gscript.run_command(
        "v.to.rast",
        input_=vlanemap,
        output=vlanemap,
        use="val",
        type_="line",
        overwrite=True,
        quiet=True,
    )

    vbasemap = "temp_icutlines_vbasemap_%i" % os.getpid()
    temp_maps.append([vbasemap, r])
    mapcalc_expression = "%s = " % vbasemap
    mapcalc_expression += "if(isnull(%s), " % vlanemap
    if existing_cutlines:
        mapcalc_expression += "if(%s == 0 && isnull(%s), " % (
            temp_edge_map,
            existingcutlinesmap,
        )
        mapcalc_expression += "%i, " % (no_edge_friction * 10)
        mapcalc_expression += "if(isnull(%s), %s, 1))," % (
            existingcutlinesmap,
            no_edge_friction,
        )
        mapcalc_expression += "%i)" % (lane_border_multiplier *
                                       no_edge_friction * 10)
    else:
        mapcalc_expression += "if(%s == 0, " % temp_edge_map
        mapcalc_expression += "%i, " % no_edge_friction
        mapcalc_expression += "1), "
        mapcalc_expression += "%i)" % (lane_border_multiplier *
                                       no_edge_friction)
    gscript.run_command("r.mapcalc",
                        expression=mapcalc_expression,
                        quiet=True,
                        overwrite=True)

    vcumcost = "temp_icutlines_vcumcost_%i" % os.getpid()
    temp_maps.append([vcumcost, r])
    vdir = "temp_icutlines_vdir_%i" % os.getpid()
    temp_maps.append([vdir, r])

    if processes > 1:
        pmemory = memory / 2.0
        rcv = gscript.start_command(
            "r.cost",
            input_=vbasemap,
            startcoordinates=vstartpoints,
            stopcoordinates=vstoppoints,
            output=vcumcost,
            outdir=vdir,
            memory=pmemory,
            quiet=True,
            overwrite=True,
        )

        rch = gscript.start_command(
            "r.cost",
            input_=hbasemap,
            startcoordinates=hstartpoints,
            stopcoordinates=hstoppoints,
            output=hcumcost,
            outdir=hdir,
            memory=pmemory,
            quiet=True,
            overwrite=True,
        )
        rcv.wait()
        rch.wait()

    else:
        gscript.run_command(
            "r.cost",
            input_=vbasemap,
            startcoordinates=vstartpoints,
            stopcoordinates=vstoppoints,
            output=vcumcost,
            outdir=vdir,
            memory=memory,
            quiet=True,
            overwrite=True,
        )

        gscript.run_command(
            "r.cost",
            input_=hbasemap,
            startcoordinates=hstartpoints,
            stopcoordinates=hstoppoints,
            output=hcumcost,
            outdir=hdir,
            memory=memory,
            quiet=True,
            overwrite=True,
        )

    hlines = "temp_icutlines_hlines_%i" % os.getpid()
    temp_maps.append([hlines, r])
    vlines = "temp_icutlines_vlines_%i" % os.getpid()
    temp_maps.append([vlines, r])

    if processes > 1:
        rdh = gscript.start_command(
            "r.drain",
            input_=hcumcost,
            direction=hdir,
            startcoordinates=hstoppoints,
            output=hlines,
            flags="d",
            quiet=True,
            overwrite=True,
        )

        rdv = gscript.start_command(
            "r.drain",
            input_=vcumcost,
            direction=vdir,
            startcoordinates=vstoppoints,
            output=vlines,
            flags="d",
            quiet=True,
            overwrite=True,
        )

        rdh.wait()
        rdv.wait()

    else:
        gscript.run_command(
            "r.drain",
            input_=hcumcost,
            direction=hdir,
            startcoordinates=hstoppoints,
            output=hlines,
            flags="d",
            quiet=True,
            overwrite=True,
        )

        gscript.run_command(
            "r.drain",
            input_=vcumcost,
            direction=vdir,
            startcoordinates=vstoppoints,
            output=vlines,
            flags="d",
            quiet=True,
            overwrite=True,
        )

    # Combine horizonal and vertical lines
    temp_raster_tile_borders = "temp_icutlines_raster_tile_borders_%i" % os.getpid(
    )
    temp_maps.append([temp_raster_tile_borders, r])
    gscript.run_command(
        "r.patch",
        input_=[hlines, vlines],
        output=temp_raster_tile_borders,
        quiet=True,
        overwrite=True,
    )

    gscript.message(_("Creating vector polygons"))

    # Create vector polygons

    # First we need to shrink the region a bit to make sure that all vector
    # points / lines fall within the raster
    gscript.use_temp_region()
    gscript.run_command("g.region",
                        s=region.s + region.nsres,
                        e=region.e - region.ewres,
                        quiet=True)

    region_map = "temp_icutlines_region_map_%i" % os.getpid()
    temp_maps.append([region_map, v])
    temp_maps.append([region_map, r])
    gscript.run_command("v.in.region",
                        output=region_map,
                        type_="line",
                        quiet=True,
                        overwrite=True)

    gscript.del_temp_region()

    gscript.run_command(
        "v.to.rast",
        input_=region_map,
        output=region_map,
        use="val",
        type_="line",
        quiet=True,
        overwrite=True,
    )

    temp_raster_polygons = "temp_icutlines_raster_polygons_%i" % os.getpid()
    temp_maps.append([temp_raster_polygons, r])
    gscript.run_command(
        "r.patch",
        input_=[temp_raster_tile_borders, region_map],
        output=temp_raster_polygons,
        quiet=True,
        overwrite=True,
    )

    temp_raster_polygons_thin = "temp_icutlines_raster_polygons_thin_%i" % os.getpid(
    )
    temp_maps.append([temp_raster_polygons_thin, r])
    gscript.run_command(
        "r.thin",
        input_=temp_raster_polygons,
        output=temp_raster_polygons_thin,
        quiet=True,
        overwrite=True,
    )

    # Create a series of temporary map names as we have to go
    # through several steps until we reach the final map.
    temp_vector_polygons1 = "temp_icutlines_vector_polygons1_%i" % os.getpid()
    temp_maps.append([temp_vector_polygons1, v])
    temp_vector_polygons2 = "temp_icutlines_vector_polygons2_%i" % os.getpid()
    temp_maps.append([temp_vector_polygons2, v])
    temp_vector_polygons3 = "temp_icutlines_vector_polygons3_%i" % os.getpid()
    temp_maps.append([temp_vector_polygons3, v])
    temp_vector_polygons4 = "temp_icutlines_vector_polygons4_%i" % os.getpid()
    temp_maps.append([temp_vector_polygons4, v])

    gscript.run_command(
        "r.to.vect",
        input_=temp_raster_polygons_thin,
        output=temp_vector_polygons1,
        type_="line",
        flags="t",
        quiet=True,
        overwrite=True,
    )

    # Erase all category values from the lines
    gscript.run_command(
        "v.category",
        input_=temp_vector_polygons1,
        op="del",
        cat="-1",
        output=temp_vector_polygons2,
        quiet=True,
        overwrite=True,
    )

    # Transform lines to boundaries
    gscript.run_command(
        "v.type",
        input_=temp_vector_polygons2,
        from_type="line",
        to_type="boundary",
        output=temp_vector_polygons3,
        quiet=True,
        overwrite=True,
    )

    # Add centroids
    gscript.run_command(
        "v.centroids",
        input_=temp_vector_polygons3,
        output=temp_vector_polygons4,
        quiet=True,
        overwrite=True,
    )

    # If a threshold is given erase polygons that are too small
    if min_tile_size:
        gscript.run_command(
            "v.clean",
            input_=temp_vector_polygons4,
            tool=["rmdangle", "rmarea"],
            threshold=[-1, min_tile_size],
            output=tiles,
            quiet=True,
            overwrite=True,
        )
    else:
        gscript.run_command("g.copy",
                            vect=[temp_vector_polygons4, tiles],
                            quiet=True,
                            overwrite=True)

    gscript.vector_history(tiles)
示例#23
0
    def new_map(self, mapa, layer, tab_sufix, objs, values, tab_subname=''):
        """Return
        """
        map_out = VectorTopo(mapa)
        if objs == [] or objs is None:
            return None

        tab_sufix_out = OUT_TABLES_NAMES[tab_sufix]
        tab_name = self.road_name + tab_sufix_out + tab_subname

        columns = OUT_TABLES[tab_sufix]
        if layer == 1:
            map_out.open('w', layer=layer, with_z=True, tab_name=tab_name,
                         tab_cols=columns)
        else:
            map_out.open('rw')
            link = Link(layer, tab_name, tab_name, 'cat' + str(layer))
            map_out.dblinks.add(link)
            table = link.table()
            if not table.exist():
                table.create(columns)
            table.conn.commit()
            map_out.close()

            map_out.open('rw', layer=layer, with_z=True)
        for i, obj in enumerate(objs):
            map_out.write(obj, i + 1, values[i])
        map_out.table.conn.commit()
        map_out.close()
示例#24
0
def main(options, flags):
    config_name = options['configuration']
    params = owsConnections[config_name]
    
    output = options['output']
    betriebid = options['betriebid']
    basename = 'B' + betriebid + '_'
    task = options['task']
    maxsoilloss = options['maxsoilloss']

    params['username'] = options['username']
    params['password'] = options['password']
    params['dsn'] = params['dsn'] + ' user=%s password=%s' \
        %(params['username'],params['password'])
        
    flag_b = flags['b'] #use newly defined barriers
    flag_g = flags['g'] #don't set region according to parcel data
    flag_i = flags['i'] #force reimport of base data
    flag_n = flags['n'] #don't import anything ('offline')
    flag_c = flags['c'] #don't copy results to output raster map
    flag_s = flags['s'] #calculate statistics for results

    ## define own methods for Vect and Rast classes
    from grass.pygrass.vector import VectorTopo as Vect
    # some monkey patching with own methods
    #autoimport vector data from PostGIS
        
    def autoimport(self, layer, *args, **kwargs):
        if not layer in params['layers'].keys():
            print('Coverage <%s> not available/not configured on server.' %layer )
        vinogr(dsn = params['dsn'], snap = 0.01,
            layer=params['layers'][layer],
            output=self.name, **kwargs)
    Vect.autoimport = autoimport
    
    from grass.pygrass.raster import RasterAbstractBase as Rast
    #autoimport raster from WCS
    def autoimport(self, coverage, *args, **kwargs):
        if not coverage in params['coverages'].keys():
            print('Coverage <%s> not available/not configured on server.' %coverage )
        r.in_wcs(url = params['url'], 
                username = params['username'], 
                password = params['password'],
                coverage=params['coverages'][coverage],
                output=self.name, **kwargs)
    Rast.autoimport = autoimport

    def setRegion(parcelmap,betriebid):
        ## set region to parcel layer extent + buffer
        reg = Region()
        reg.vect(parcelmap.name)
        regbuffer = 100
        reg.north += regbuffer
        reg.east += regbuffer
        reg.south -= regbuffer
        reg.west -= regbuffer
        reg.set_current()
        # set_current() not working right now
        # so using g.region() :
        g.region(n=str(reg.north), s=str(reg.south), w=str(reg.west), e=str(reg.east), res='2', flags='a',quiet=quiet)
        g.region(save='B'+betriebid,overwrite=True,quiet=quiet)
    
    def slopestats():
        slopemap = Rast(maps['elevation'].name + '.slope')
        r.slope_aspect(elevation=maps['elevation'].name, slope=slopemap.name, format='percent') 
        print('\n \n Statistics for slope <%s> (slope in %%): '%(slopemap.name))
        rsoillossstats(soilloss=slopemap.name, map=parcelmap.name, parcelnumcol='id')
        
    
    def sbare():
        rsoillossreclass.flags.u = True
        rsoillossreclass(maps['soillossbare'].name, 'soillossbare',flags='')
        
        if flag_s:
            print('\n \n Statistics for soilloss <%s> : '%(soillossbaremap.name))
            rsoillossstats(soilloss=soillossbaremap.name, 
                           map=parcelmap.name, parcelnumcol='id')
        if not flag_c:
            g.copy(rast=(soillossbaremap.name,output))
            gscript.message('Copy made to <%s> for automatic output' %(output))
    
    def sbareupdate():
        rsoillossupdate.inputs.map = parcelmap.name
        rsoillossupdate.inputs.factorold = maps['kfactor'].name
        rsoillossupdate.inputs.factorcol = 'kfactor'
        rsoillossupdate.flags.k = True
        rsoillossupdate.flags.p = True
        rsoillossupdate(soillossin=maps['soillossbare'].name, 
                        soillossout=soillossbarecorrmap.name)
        gscript.message('Soilloss for bare soil successfully updated to <%s> using parcelwise kfactor.' %(soillossbarecorrmap.name))
        if not flag_c:
            g.copy(rast=(soillossbarecorrmap.name,output))
            gscript.message('Copy made to <%s> for automatic output' %(output))
            
        rsoillossreclass(soillossbarecorrmap.name, 'soillossbare',flags='')
        gscript.message('Reclassified and colored maps found in <%s.3> and <%s.9> .'%(soillossbarecorrmap.name, soillossbarecorrmap.name))
        
        if flag_s:
            print('\n \n Statistics for soilloss on bare soil <%s> : '%(soillossgrowmap))
            rsoillossstats(soilloss=soillossbarecorrmap.name, map=parcelmap.name, parcelnumcol='id')
      
    def sgrow():
        if soillossbarecorrmap.exist():
            rsoillossgrow.inputs.soillossbare = soillossbarecorrmap.name
        else: rsoillossgrow.inputs.soillossbare = soillossbaremap.name
        rsoillossgrow.inputs.map = parcelmap.name
        rsoillossgrow.inputs.factorcols = (params['colnames'][('cfactor')],)
        rsoillossgrow.inputs.factorcols += (params['colnames'][('pfactor')],)
        rsoillossgrow(soillossgrow=soillossgrowmap.name)
        gscript.message('Soilloss for grown soil successfully calculated to <%s> using parcelwise C and P factor.' %(soillossgrowmap))
                
        if not flag_c:
            g.copy(rast=(soillossgrowmap.name,output))
            gscript.message('Copy made to <%s> for automatic output' %(output))

        rsoillossreclass(soillossgrowmap.name, 'soillossgrow',flags='')
        gscript.message('Reclassified and colored maps found in <%s.3> and <%s.9> .'%(soillossgrowmap.name, soillossgrowmap.name))

        if flag_s:
            print('\n \n Statistics for soilloss on grown soil <%s> : '%(soillossgrowmap))
            rsoillossstats(soilloss=soillossgrowmap.name, map=parcelmap.name, parcelnumcol='id')
                    
    def scpmax():
        if soillossbarecorrmap.exist():
            rsoillosscpmax.inputs.soillossbare = soillossbarecorrmap.name
        else: rsoillosscpmax.inputs.soillossbare = soillossbaremap.name
        
        rsoillosscpmax.inputs.maxsoilloss=maxsoilloss
        rsoillosscpmax(cpmax=soillosscpmaxmap.name)
        
        if not flag_c:
            g.copy(rast=(soillosscpmaxmap.name,output))
            gscript.message('Copy made to <%s> for automatic output' %(output))
        
        if flag_s:
            print('\n \n Statistics for <%s> : '%(soillosscpmaxmap))
            rsoillossstats(soilloss=soillosscpmaxmap.name, map=parcelmap.name, parcelnumcol='id')
             
    def smeasure():
        gscript.message('Import <%s>' % measuremap.name)
        measuremap.autoimport('measures', overwrite=True, quiet=quiet,
                              where="betrieb_id = %s" % betriebid)
        
        soillossbaremap = maps['soillossbare']
        kfactormap = maps['kfactor']

        if soillossbarecorrmap.exist():
            gscript.message('Using updated soillossbare map.')
            soillossbaremap = soillossbarecorrmap
            kfactormap = Rast(parcelmap.name + '.kfactor')
        
        if flag_b:
            measurebarriermap = Vect(measuremap.name + '_barrier')
            v.extract(input=measuremap.name, where="barrier = 1",
                      output=measurebarriermap.name)
            
            measurefieldblockmap = Vect(measuremap.name + '_fieldblocks')
            v.overlay(ainput=maps['fieldblocks'].name,
                      binput=measurebarriermap.name,\
                      operator='not', 
                      output=measurefieldblockmap.name)
            
            rsoillossbare.inputs.elevation = maps['elevation'].name
            rsoillossbare.inputs.rfactor = maps['rfactor'].name
            rsoillossbare.inputs.kfactor = kfactormap.name
            rsoillossbare.inputs.map = measurefieldblockmap.name
            rsoillossbare.inputs.constant_m = '0.6'
            rsoillossbare.inputs.constant_n = '1.4'


            rsoillossbare.flags.r = True
            rsoillossbare(soillossbare=soillossbarebarriermap.name)
            soillossbaremap = soillossbarebarriermap

        parcelpfactor = parcelmap.name + '.pfactor'
        parcelcfactor = parcelmap.name + '.cfactor'
        v.to_rast(input=parcelmap.name, use='attr', attrcolumn='pfactor',
                  output=parcelpfactor)
        v.to_rast(input=parcelmap.name, use='attr', attrcolumn='cfactor',
                  output=parcelcfactor)
                  
        measurepfactor = measuremap.name + '.pfactor'
        measurecfactor = measuremap.name + '.cfactor'
        v.to_rast(input=measuremap.name, use='attr', attrcolumn='pfactor',
                  output=measurepfactor)
        v.to_rast(input=measuremap.name, use='attr', attrcolumn='cfactor',
                  output=measurecfactor)

        pfactor = parcelmap.name + '.pfactor.measure'
        cfactor = parcelmap.name + '.cfactor.measure'

        r.patch(input=(measurepfactor,parcelpfactor), output=pfactor)
        r.patch(input=(measurecfactor,parcelcfactor), output=cfactor)
        rsoillossgrow.inputs.soillossbare = soillossbaremap.name
        rsoillossgrow.inputs.cfactor = pfactor
        rsoillossgrow.inputs.pfactor = cfactor
        rsoillossgrow(soillossgrow=soillossmeasuremap.name)
        
        rsoillossreclass(soillossmeasuremap.name, 'soillossgrow',flags='')
        gscript.message('Reclassified and colored maps found in <%s.3> and <%s.9> .'%(soillossmeasuremap.name, soillossmeasuremap.name))

        if flag_s:
            gscript.message('\n \n Statistics for soilloss on grown soil <%s> : '%(soillossgrowmap))
            rsoillossstats(soilloss=soillossmeasuremap.name, map=parcelmap.name, parcelnumcol='id')
        
        if not flag_c:
            g.copy(rast=(soillossmeasuremap.name,output))
            gscript.message('Copy made to <%s> for automatic output' %(output))
    
#######################################################################
## BEGIN main controls
    curregion = Mapset()
    permanent = Mapset('PERMANENT')
    if curregion.name == permanent.name:
        gscript.fatal("Please change mapset. It can be dangerous to use this prealpha-module in PERMANENT")
            
    parcelmap = Vect(basename+'parcels')  
    if not flag_n:
        parcelmap.autoimport('parcels', overwrite=True, quiet=quiet,
                             where="betrieb_id = %s" % betriebid)
        #if parcelmap.popen.returncode <> 0:
        #   gscript.fatal('Import der Parzellendaten gescheitert.')
        
    if not flag_g: 
        setRegion(parcelmap,betriebid)
        gscript.verbose('Region set to parcels extent + 100 raster cells. \
            \n Resolution: raster cell = 2 x 2 meter.')
            
    basedata_rast = ('elevation','soillossbare','kfactor','rfactor')
    basedata_vect = ('fieldblocks',)
    
    maps = {}
    for map in (basedata_rast):
        mapname = basename + map
        maps[map] = Rast(mapname)
        
    for map in (basedata_vect):
        mapname = basename + map
        maps[map] = Vect(mapname)
      
    if not flag_n:
        vinogr.flags.r = True
        vinogr.inputs.where = ""

        for mapname in maps.keys():
            map = maps[mapname]
            if map.exist() and flag_i:
                map.remove()
            if not map.exist():
                map.autoimport(mapname)
                
    
    soillossbaremap = maps['soillossbare']
    
    soillossbarecorrmap = Rast(maps['soillossbare'].name +'.update')
    soillossgrowmap = Rast(basename+'soillossgrow')
    soillosscpmaxmap = Rast(basename+'cpmax')
    measuremap = Vect(basename+'measures')
    soillossmeasuremap = Rast(basename+'soillossgrow.measure')
    soillossbarebarriermap = Rast(basename+'soillossbare.barrier')
    
    
    gscript.error('Import ok. Beginning task %s ...' %task)

    tasks = {'soilloss.bare' : sbare,
        'soilloss.bare.update': sbareupdate,
        'soilloss.grow' : sgrow,
        'soilloss.grow.measure' : smeasure,
        'soilloss.cpmax' : scpmax,
        'slope.stats' : slopestats
        }
    
    if task in tasks:
        tasks[task]()
    else:
        gscript.fatal('Please choose a valid task')
示例#25
0
    def structures(self, elev, stream=None,
                   ndigits=0, resolution=None, contour=None):
        """Return a tuple with lines structres options of a hypotetical plant.

        ::

              river
               \ \
                \i\-------_______
                |\ \              \
                | \ \              \
                )  \ \              )  cond0
               /    \ \           /
              /      \ \         /
             ( cond1  \ \       /
              \        \ \      |
               \        \ \     |
                \        \ \    |
                 o--------\r\---o
               pstk1       \ \   pstk0
                            \ \

        Parameters
        ----------

        elev: raster
            Raster instance already opened with the elevation.
        intake_pnt: point
            It is the point of the intake.
        restitution_pnt: point
            It is the point of the restitution.

        Returns
        -------

        a list of tuple: [(HydroStruct(intake, conduct=cond0, penstock=pstk0),
                           HydroStruct(intake, conduct=cond1, penstock=pstk1))]
           Return a list of tuples, containing two HydroStruct the first with
           the shortest penstock and the second with the other option.
        """
        def get_struct(contur, respoint):
            """Return the lines of the conduct and the penstock.

            Parameters
            ----------

            contur: line segment
                It is a line segment of the contur line splited with splitline
                function, where the first point is the intake.
            respoint: point
                It is the point of the plant restitution.

            Returns
            -------

            tuple: (conduct, penstock)
               Return two lines, the first with the conduct and the second with
               the penstock. Note: both conduct and penstock lines are coherent
               with water flow direction.
            """
            dist = contur.distance(respoint)
            conduct = contur.segment(0, dist.sldist)
            penstock = Line([dist.point, respoint])
            return conduct, penstock

        def get_all_structs(contur, itk, res):
            l0, l1 = splitline(contur, itk.point,
                               3*itk.point.distance(res.point))
            # get structs
            c0, p0 = get_struct(l0, res.point)
            c1, p1 = get_struct(l1, res.point)
            s0, s1 = 'option0', 'option1'
            # TODO: uncomment this to have left and right distinction...
            # but sofar is not working properly, therefore is commented.
            #if stream is not None:
            #    sitk = stream.find['by_point'].geo(itk.point, maxdist=100000)
            #    s0, s1 = (('right', 'left') if isinverted(sitk, elev, reg)
            #              else ('left', 'right'))
            return (HydroStruct(itk, c0, p0, s0), HydroStruct(itk, c1, p1, s1))

        result = []
        if contour is None:
            levels = sorted(set([closest(itk.elevation,
                                         ndigits=ndigits, resolution=resolution)
                                 for itk in self.intakes]))

            # generate the contur line that pass to the point
            contour_tmp = 'tmpvect%04d' % random.randint(1000, 9999)
            r.contour(input='%s@%s' % (elev.name, elev.mapset),
                      output=contour_tmp, step=0, levels=levels,
                      overwrite=True)

            cnt = VectorTopo(contour_tmp)
            cnt.open()
        else:
            cnt = contour

        for itk in self.intakes:
            # find the closest contur line
            contur_res = cnt.find['by_point'].geo(self.restitution.point,
                                                  maxdist=100000.0)

            # TODO: probably find the contur line for the intake and
            # the restitution it is not necessary, and we could also remove
            # the check bellow: contur_itk.id != contur_res.id
            contur_itk = cnt.find['by_point'].geo(itk.point,
                                                  maxdist=100000.0)
            if contur_itk is None or contur_res is None:
                msg = ('Not able to find the contur line closest to the '
                       'intake point %r, of the plant %r'
                       'from the contur line map: %s')
                raise TypeError(msg % (itk, self, cnt.name))
            if contur_itk.id != contur_res.id:
                print('=' * 30)
                print(itk)
                msg = ("Contur lines are different! %d != %d, in %s."
                       "Therefore %d will be used.")
                print(msg % (contur_itk.id, contur_res.id, cnt.name,
                             contur_itk.id))

            # check contour
            contur = not_overlaped(contur_itk)
            contur = sort_by_west2east(contur)
            result.append(get_all_structs(contur, itk, self.restitution))

        # remove temporary vector map
        if contour is None:
            cnt.close()
            cnt.remove()
        return result
def main(opts, flgs):
    TMPVECT = []
    DEBUG = True if flgs['d'] else False
    atexit.register(cleanup, vect=TMPVECT, debug=DEBUG)
    # check input maps
    rhydro = ['kind_label', 'discharge', 'id_point', 'id_plant']
    rother = ['kind_label', 'discharge', 'id_point', 'id_plant']
    ovwr = overwrite()

    try:
        hydro = check_required_columns(opts['hydro'], int(opts['hydro_layer']),
                                       rhydro, 'hydro')
        if opts['other']:
            other = check_required_columns(opts['other'], opts['other_layer'],
                                           rother, 'other')
        else:
            other = None
        #minflow = check_float_or_raster(opts['minflow'])
    except ParameterError as exc:
        exception2error(exc)

    # start working
    hydro.open('r')
    el, mset = (opts['elevation'].split('@') if '@' in opts['elevation'] else
                (opts['elevation'], ''))
    elev = RasterRow(name=el, mapset=mset)
    elev.open('r')
    #import ipdb; ipdb.set_trace()
    plants, skipped = read_plants(hydro,
                                  elev=elev,
                                  restitution=opts['hydro_kind_turbine'],
                                  intake=opts['hydro_kind_intake'])
    hydro.close()
    rvname, rvmset = (opts['river'].split('@') if '@' in opts['river'] else
                      (opts['river'], ''))

    vplants = opts['output_plants'] if opts['output_plants'] else 'tmpplants'
    #FIXME: I try with tmpplants in my mapset and it doesn'work
    if opts['output_plants'] == '':
        TMPVECT.append(vplants)
    with VectorTopo(rvname, rvmset, mode='r') as river:
        write_plants(plants, vplants, river, elev, overwrite=ovwr)

    if skipped:
        for skip in skipped:
            print("Plant: %r, Point: %r, kind: %r" % skip)
    elev.close()

    # compute a buffer around the plants
    buff = vplants + 'buff'
    v.buffer(input=vplants,
             type='line',
             output=buff,
             distance=0.1,
             overwrite=ovwr)
    TMPVECT.append(buff)
    # return all the river segments that are not already with plants
    v.overlay(flags='t',
              ainput=opts['river'],
              atype='line',
              binput=buff,
              operator='not',
              output=opts['output_streams'],
              overwrite=ovwr)
示例#27
0
def main():
    """
    Adds GSFLOW parameters to a set of HRU sub-basins
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()
    basins = options['input']
    HRU = options['output']
    slope = options['slope']
    aspect = options['aspect']
    elevation = options['elevation']
    land_cover = options['cov_type']
    soil = options['soil_type']

    ################################
    # CREATE HRUs FROM SUB-BASINS  #
    ################################

    g.copy(vector=(basins, HRU), overwrite=gscript.overwrite())

    ############################################
    # ATTRIBUTE COLUMNS (IN ORDER FROM MANUAL) #
    ############################################

    # HRU
    hru_columns = []
    # Self ID
    hru_columns.append('id integer')  # nhru
    # Basic Physical Attributes (Geometry)
    hru_columns.append('hru_area double precision')  # acres (!!!!)
    hru_columns.append(
        'hru_area_m2 double precision')  # [not for GSFLOW: for me!]
    hru_columns.append('hru_aspect double precision')  # Mean aspect [degrees]
    hru_columns.append('hru_elev double precision')  # Mean elevation
    hru_columns.append('hru_lat double precision')  # Latitude of centroid
    hru_columns.append('hru_lon double precision')  # Longitude of centroid
    # unnecessary but why not?
    hru_columns.append('hru_slope double precision')  # Mean slope [percent]
    # Basic Physical Attributes (Other)
    #hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1
    #hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default.
    # Measured input
    hru_columns.append(
        'outlet_sta integer')  # Index of streamflow station at basin outlet:
    # station number if it has one, 0 if not
    # Note that the below specify projections and note lat/lon; they really seem
    # to work for any projected coordinates, with _x, _y, in meters, and _xlong,
    # _ylat, in feet (i.e. they are just northing and easting). The meters and feet
    # are not just simple conversions, but actually are required for different
    # modules in the code, and are hence redundant but intentional.
    hru_columns.append('hru_x double precision')  # Easting [m]
    hru_columns.append('hru_xlong double precision')  # Easting [feet]
    hru_columns.append('hru_y double precision')  # Northing [m]
    hru_columns.append('hru_ylat double precision')  # Northing [feet]
    # Streamflow and lake routing
    hru_columns.append(
        'K_coef double precision'
    )  # Travel time of flood wave to next downstream segment;
    # this is the Muskingum storage coefficient
    # 1.0 for reservoirs, diversions, and segments flowing
    # out of the basin
    hru_columns.append(
        'x_coef double precision')  # Amount of attenuation of flow wave;
    # this is the Muskingum routing weighting factor
    # range: 0.0--0.5; default 0.2
    # 0 for all segments flowing out of the basin
    hru_columns.append('hru_segment integer'
                       )  # ID of stream segment to which flow will be routed
    # this is for non-cascade routing (flow goes directly
    # from HRU to stream segment)
    hru_columns.append('obsin_segment integer'
                       )  # Index of measured streamflow station that replaces
    # inflow to a segment
    hru_columns.append(
        'cov_type integer'
    )  # 0=bare soil;1=grasses; 2=shrubs; 3=trees; 4=coniferous
    hru_columns.append('soil_type integer')  # 1=sand; 2=loam; 3=clay

    # Create strings
    hru_columns = ",".join(hru_columns)

    # Add columns to tables
    v.db_addcolumn(map=HRU, columns=hru_columns, quiet=True)

    ###########################
    # UPDATE DATABASE ENTRIES #
    ###########################

    colNames = np.array(gscript.vector_db_select(HRU, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(HRU, layer=1)['values'].values())
    number_of_hrus = colValues.shape[0]
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rnums = colValues[:, colNames == 'rnum'].astype(int).squeeze()

    nhru = np.arange(1, number_of_hrus + 1)
    nhrut = []
    for i in range(len(nhru)):
        nhrut.append((nhru[i], cats[i]))
    # Access the HRUs
    hru = VectorTopo(HRU)
    # Open the map with topology:
    hru.open('rw')
    # Create a cursor
    cur = hru.table.conn.cursor()
    # Use it to loop across the table
    cur.executemany("update " + HRU + " set id=? where cat=?", nhrut)
    # Commit changes to the table
    hru.table.conn.commit()
    # Close the table
    hru.close()
    """
    # Do the same for basins <-------------- DO THIS OR SIMPLY HAVE HRUs OVERLAIN WITH GRID CELLS? IN THIS CASE, RMV AREA ADDITION TO GRAVRES
    v.db_addcolumn(map=basins, columns='id int', quiet=True)
    basins = VectorTopo(basins)
    basins.open('rw')
    cur = basins.table.conn.cursor()
    cur.executemany("update basins set id=? where cat=?", nhrut)
    basins.table.conn.commit()
    basins.close()
    """

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    #hru_columns.append('hru_area double precision')
    # Acres b/c USGS
    v.to_db(map=HRU,
            option='area',
            columns='hru_area',
            units='acres',
            quiet=True)
    v.to_db(map=HRU,
            option='area',
            columns='hru_area_m2',
            units='meters',
            quiet=True)

    # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN

    # SLOPE (and aspect)
    #####################
    v.rast_stats(map=HRU,
                 raster=slope,
                 method='average',
                 column_prefix='tmp',
                 flags='c',
                 quiet=True)
    v.db_update(map=HRU,
                column='hru_slope',
                query_column='tmp_average',
                quiet=True)

    # ASPECT
    #########
    v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)
    # Dealing with conversion from degrees (no good average) to something I can
    # average -- x- and y-vectors
    # Geographic coordinates, so sin=x, cos=y.... not that it matters so long
    # as I am consistent in how I return to degrees
    r.mapcalc('aspect_x = sin(' + aspect + ')',
              overwrite=gscript.overwrite(),
              quiet=True)
    r.mapcalc('aspect_y = cos(' + aspect + ')',
              overwrite=gscript.overwrite(),
              quiet=True)
    #grass.run_command('v.db.addcolumn', map=HRU, columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer')
    v.rast_stats(map=HRU,
                 raster='aspect_x',
                 method='sum',
                 column_prefix='aspect_x',
                 flags='c',
                 quiet=True)
    v.rast_stats(map=HRU,
                 raster='aspect_y',
                 method='sum',
                 column_prefix='aspect_y',
                 flags='c',
                 quiet=True)
    hru = VectorTopo(HRU)
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" % hru.name)
    _arr = np.array(cur.fetchall()).astype(float)
    _cat = _arr[:, 0]
    _aspect_x_sum = _arr[:, 1]
    _aspect_y_sum = _arr[:, 2]
    aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180. / np.pi
    aspect_angle[aspect_angle < 0] += 360  # all positive
    aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose()
    cur.executemany("update " + HRU + " set hru_aspect=? where cat=?",
                    aspect_angle_cat)
    hru.table.conn.commit()
    hru.close()

    # ELEVATION
    ############
    v.rast_stats(map=HRU,
                 raster=elevation,
                 method='average',
                 column_prefix='tmp',
                 flags='c',
                 quiet=True)
    v.db_update(map=HRU,
                column='hru_elev',
                query_column='tmp_average',
                quiet=True)
    v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)

    # CENTROIDS
    ############

    # get x,y of centroid -- but have areas not in database table, that do have
    # centroids, and having a hard time finding a good way to get rid of them!
    # They have duplicate category values!
    # Perhaps these are little dangles on the edges of the vectorization where
    # the raster value was the same but pinched out into 1-a few cells?
    # From looking at map, lots of extra centroids on area boundaries, and removing
    # small areas (though threshold hard to guess) gets rid of these

    hru = VectorTopo(HRU)
    hru.open('rw')
    hru_cats = []
    hru_coords = []
    for hru_i in hru:
        if type(hru_i) is vector.geometry.Centroid:
            hru_cats.append(hru_i.cat)
            hru_coords.append(hru_i.coords())
    hru_cats = np.array(hru_cats)
    hru_coords = np.array(hru_coords)
    hru.rewind()

    hru_area_ids = []
    for coor in hru_coords:
        _area = hru.find_by_point.area(Point(coor[0], coor[1]))
        hru_area_ids.append(_area)
    hru_area_ids = np.array(hru_area_ids)
    hru.rewind()

    hru_areas = []
    for _area_id in hru_area_ids:
        hru_areas.append(_area_id.area())
    hru_areas = np.array(hru_areas)
    hru.rewind()

    allcats = sorted(list(set(list(hru_cats))))

    # Now create weighted mean
    hru_centroid_locations = []
    for cat in allcats:
        hrus_with_cat = hru_cats[hru_cats == cat]
        if len(hrus_with_cat) == 1:
            hru_centroid_locations.append(
                (hru_coords[hru_cats == cat]).squeeze())
        else:
            _centroids = hru_coords[hru_cats == cat]
            #print _centroids
            _areas = hru_areas[hru_cats == cat]
            #print _areas
            _x = np.average(_centroids[:, 0], weights=_areas)
            _y = np.average(_centroids[:, 1], weights=_areas)
            #print _x, _y
            hru_centroid_locations.append(np.array([_x, _y]))

    # Now upload weighted mean to database table
    # allcats and hru_centroid_locations are co-indexed
    index__cats = create_iterator(HRU)
    cur = hru.table.conn.cursor()
    for i in range(len(allcats)):
        # meters
        cur.execute('update ' + HRU + ' set hru_x=' +
                    str(hru_centroid_locations[i][0]) + ' where cat=' +
                    str(allcats[i]))
        cur.execute('update ' + HRU + ' set hru_y=' +
                    str(hru_centroid_locations[i][1]) + ' where cat=' +
                    str(allcats[i]))
        # feet
        cur.execute('update ' + HRU + ' set hru_xlong=' +
                    str(hru_centroid_locations[i][0] * 3.28084) +
                    ' where cat=' + str(allcats[i]))
        cur.execute('update ' + HRU + ' set hru_ylat=' +
                    str(hru_centroid_locations[i][1] * 3.28084) +
                    ' where cat=' + str(allcats[i]))
        # (un)Project to lat/lon
        _centroid_ll = gscript.parse_command('m.proj',
                                             coordinates=list(
                                                 hru_centroid_locations[i]),
                                             flags='od').keys()[0]
        _lon, _lat, _z = _centroid_ll.split('|')
        cur.execute('update ' + HRU + ' set hru_lon=' + _lon + ' where cat=' +
                    str(allcats[i]))
        cur.execute('update ' + HRU + ' set hru_lat=' + _lat + ' where cat=' +
                    str(allcats[i]))

    # feet -- not working.
    # Probably an issue with index__cats -- maybe fix later, if needed
    # But currently not a major speed issue
    """
    cur.executemany("update "+HRU+" set hru_xlong=?*3.28084 where hru_x=?", 
                    index__cats)
    cur.executemany("update "+HRU+" set hru_ylat=?*3.28084 where hru_y=?", 
                    index__cats)
    """

    cur.close()
    hru.table.conn.commit()
    hru.close()

    # ID NUMBER
    ############
    #cur.executemany("update "+HRU+" set hru_segment=? where id=?",
    #                index__cats)
    # Segment number = HRU ID number
    v.db_update(map=HRU, column='hru_segment', query_column='id', quiet=True)

    # LAND USE/COVER
    ############
    try:
        land_cover = int(land_cover)
    except:
        pass
    if type(land_cover) is int:
        if land_cover <= 3:
            v.db_update(map=HRU,
                        column='cov_type',
                        value=land_cover,
                        quiet=True)
        else:
            sys.exit(
                "WARNING: INVALID LAND COVER TYPE. CHECK INTEGER VALUES.\n"
                "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW")
    else:
        # NEED TO UPDATE THIS TO MODAL VALUE!!!!
        print "Warning: values taken from HRU centroids. Code should be updated to"
        print "acquire modal values"
        v.what_rast(map=HRU,
                    type='centroid',
                    raster=land_cover,
                    column='cov_type',
                    quiet=True)
        #v.rast_stats(map=HRU, raster=land_cover, method='average', column_prefix='tmp', flags='c', quiet=True)
        #v.db_update(map=HRU, column='cov_type', query_column='tmp_average', quiet=True)
        #v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)

    # SOIL
    ############
    try:
        soil = int(soil)
    except:
        pass
    if type(soil) is int:
        if (soil > 0) and (soil <= 3):
            v.db_update(map=HRU, column='soil_type', value=soil, quiet=True)
        else:
            sys.exit("WARNING: INVALID SOIL TYPE. CHECK INTEGER VALUES.\n"
                     "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW")
    else:
        # NEED TO UPDATE THIS TO MODAL VALUE!!!!
        print "Warning: values taken from HRU centroids. Code should be updated to"
        print "acquire modal values"
        v.what_rast(map=HRU,
                    type='centroid',
                    raster=soil,
                    column='soil_type',
                    quiet=True)
    def test_strahler(self):
        self.assertModule("v.stream.order",
                          input="stream_network",
                          points="stream_network_outlets",
                          output="stream_network_order_test_strahler",
                          threshold=25,
                          order=["strahler"],
                          overwrite=True,
                          verbose=True)

        # Check the strahler value
        v = VectorTopo(name="stream_network_order_test_strahler", mapset="")
        v.open(mode="r")

        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 41)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)

        v.close()
示例#29
0
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

okresy = VectorTopo('okresy_polygon', mapset='ruian')
okresy.open('r')

for o in okresy.viter('areas'):
    sousede = set()
    for b in o.boundaries():
        for n in b.get_left_right():
            if n != -1 and n != o.id:
                sousede.add(n)
    
    print (u'{:20}: {}'.format(o.attrs['nazev'], len(sousede)))

okresy.close()
示例#30
0
def sample(vect_in_name, rast_in_name):
    """sample('point00', 'field')"""
    # instantiate the object maps
    vect_in = VectorTopo(vect_in_name)
    rast_in = RasterRow(rast_in_name)
    vect_out = VectorTopo('test_' + vect_in_name)
    # define the columns of the attribute table of the new vector map
    columns = [(u'cat',       'INTEGER PRIMARY KEY'),
               (rast_in_name,  'DOUBLE')]
    # open the maps
    vect_in.open('r')
    rast_in.open('r')
    vect_out.open('w', tab_cols=columns, link_driver='sqlite')
    # get the current region
    region = Region()
    # initialize the counter
    counter = 0
    data = []
    for pnt in vect_in.viter('points'):
        counter += 1
        # transform the spatial coordinates in row and col value
        x, y = coor2pixel(pnt.coords(), region)
        value = rast_in[int(x)][int(y)]
        data.append((counter, None if np.isnan(value) else float(value)))
        # write the geometry features
        vect_out.write(pnt)
    # write the attributes
    vect_out.table.insert(data, many=True)
    vect_out.table.conn.commit()
    # close the maps
    vect_in.close()
    rast_in.close()
    vect_out.close()
示例#31
0
def main():
    soillossin = options['soillossin']
    soillossout = options['soillossout']
    factorold = options['factorold']
    
    factornew = options['factornew']
    map = options['map']
    factorcol = options['factorcol']
    
    flag_p = flags['p'] # patch factornew with factorold
    flag_k = flags['k'] # calculate k-factor components from % clay p_T, silt p_U, stones p_st, humus p_H 

     
    if not factornew:
        factors = {}
        if flag_k:
            gscript.message('Using factor derived from \
                soil components.')
            parcelmap = Vect(map)
            parcelmap.open(mode='rw', layer=1)
            parcelmap.table.filters.select()
            cur = parcelmap.table.execute()
            col_names = [cn[0] for cn in cur.description]
            rows = cur.fetchall()
           
            for col in (u'Kb',u'Ks',u'Kh', u'K'):
                if col not in parcelmap.table.columns:
                    parcelmap.table.columns.add(col,u'DOUBLE')
           
            for row in rows:
                rowid = row[1]
                p_T = row[7]
                p_U = row[8]
                p_st = row[9]
                p_H = row[10]
    
                print("Parzelle mit id %d :" %rowid)
                for sublist in bodenarten:
                    # p_T and p_U
                    if p_T in range(sublist[2],sublist[3]) \
                        and p_U in range(sublist[4],sublist[5]) :
                        print('Bodenart "' + sublist[1] 
                            + '", Kb = ' + str(sublist[6]))
                        Kb = sublist[6]
                        break
                
                for sublist in skelettgehalte:
                    if p_st < sublist[0]:
                        print('Skelettgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Ks = sublist[1]
                        break
            
                   
                for sublist in humusgehalte:
                    if p_H < sublist[0]:
                        print('Humusgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Kh = sublist[1]
                        break
                
                
                K = Kb * Ks * Kh
                print('K = ' + str(K))
        
                if K > 0:
                    parcelmap.table.execute("UPDATE " +  parcelmap.name 
                        + " SET"
                        + " Kb=" + str(Kb)
                        + ", Ks=" + str(Ks)
                        + ", Kh=" + str(Kh)
                        + ", K=" + str(K)
                        + " WHERE id=" + str(rowid) )
                    parcelmap.table.conn.commit()
                
            parcelmap.close()
            factorcol2 = 'K'
            
            factors['k'] = map.split('@')[0]+'.tmp.'+factorcol2
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol2,
                   output=factors['k'])
            r.null(map=factors['k'], setnull='0')

        
        if factorcol:
            gscript.message('Using factor from column %s of \
                    vector map <%s>.' % (factorcol, map) )
                    
            factors['factorcol'] = map.split('@')[0]+'.tmp.' + factorcol
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol,
                   output=factors['factorcol'])
            r.null(map=factors['factorcol'], setnull='0')
        
        print factors.keys()
        if not 'k' in factors and not 'factorcol' in factors: 
            gscript.fatal('Please provide either factor \
                raster map or valid vector map with factor column \
                (kfactor) or factor components columns (Kb, Ks, Kh)' )
        
        #if 'k' in factors and 'factorcol' in factors: 
    
        factornew = map.split('@')[0]+'.kfactor'
        if 'k' in factors and 'factorcol' in  factors:
            factornew = map.split('@')[0]+'.kfactor'
            r.patch(input=(factors['factorcol'],factors['k']),
                    output=factornew)
            
        elif 'k' in factors:
            g.copy(rast=(factors['k'],factornew))
            
        elif 'factorcol' in factors:
            g.copy(rast=(factors['factorcol'],factornew))

            
    if flag_p:
        #factorcorr = factorold + '.update'
        r.patch(input=(factornew,factorold), output=factornew)
        
    formula = soillossout + '=' + soillossin \
                + '/' + factorold  \
                + '*' + factornew
    r.mapcalc(formula)
            
    r.colors(map=soillossout, raster=soillossin)
示例#32
0
gsetup.init(gisbase, gisdbase, location, mapset)

grass.run_command('r.in.gdal',
                  overwrite='true',
                  input=dgm_filepath,
                  output='dgm')
grass.run_command('v.in.ogr',
                  overwrite='true',
                  input=summit_filepath,
                  output='summit')

from grass.pygrass.vector import VectorTopo
from grass.pygrass.modules import Module

summit = VectorTopo("summit", "PERMANENT", LOCATION_NAME="Kufstein")
summit.open(mode='r')
pointsList = []
for i in range(len(summit)):
    pointsList.append(summit.read(i + 1))

grass.run_command("r.neighbors",
                  input="dgm",
                  output="dgm_maxfilter",
                  method="maximum",
                  size=31)
type("x")
x = grass.raster_what('dgm_maxfilter', [[pointsList[0].x, pointsList[0].y]],
                      env=None,
                      localized=False)
hs = x[0]["dgm_maxfilter"]["value"]
    def test_all(self):
        self.assertModule("v.stream.order",
                          input="stream_network",
                          points="stream_network_outlets",
                          output="stream_network_order_test_all",
                          threshold=25,
                          order=["strahler", "shreve", "drwal", "scheidegger"],
                          overwrite=True,
                          verbose=True)

        # Check all values
        v = VectorTopo(name="stream_network_order_test_all", mapset="")
        v.open(mode="r")
        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 41)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)
        self.assertEqual(v.read(4).attrs["shreve"], 32)
        self.assertEqual(v.read(4).attrs["drwal"], 6)
        self.assertEqual(v.read(4).attrs["scheidegger"], 64)
        v.close()

        # Check for column copy
        self.assertModule(
            "v.stream.order",
            input="stream_network_order_test_all",
            points="stream_network_outlets",
            output="stream_network_order_test_all_2",
            threshold=25,
            order=["strahler", "shreve", "drwal", "scheidegger"],
            columns=["strahler", "shreve", "drwal", "scheidegger"],
            overwrite=True,
            verbose=True)

        # Check all values and their copies
        v = VectorTopo(name="stream_network_order_test_all_2", mapset="")
        v.open(mode="r")
        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 4)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)
        self.assertEqual(v.read(4).attrs["shreve"], 32)
        self.assertEqual(v.read(4).attrs["drwal"], 6)
        self.assertEqual(v.read(4).attrs["scheidegger"], 64)
        self.assertEqual(v.read(4).attrs["strahler_1"], 4)
        self.assertEqual(v.read(4).attrs["shreve_1"], 32)
        self.assertEqual(v.read(4).attrs["drwal_1"], 6)
        self.assertEqual(v.read(4).attrs["scheidegger_1"], 64)
        # feature 7
        self.assertEqual(v.read(7).attrs.cat, 7)
        self.assertEqual(v.read(7).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(7).attrs["network"], 1)
        self.assertEqual(v.read(7).attrs["reversed"], 0)
        self.assertEqual(v.read(7).attrs["strahler"], 2)
        self.assertEqual(v.read(7).attrs["strahler_1"], 2)
        self.assertEqual(v.read(7).attrs["shreve"], 4)
        self.assertEqual(v.read(7).attrs["drwal"], 3)
        self.assertEqual(v.read(7).attrs["scheidegger"], 8)
        v.close()
示例#34
0
def create_test_vector_map(map_name="test_vector"):
    """This functions creates a vector map layer with points, lines, boundaries,
       centroids, areas, isles and attributes for testing purposes

       This should be used in doc and unit tests to create location/mapset
       independent vector map layer. This map includes 3 points, 3 lines,
       11 boundaries and 4 centroids. The attribute table contains cat, name
       and value columns.

        param map_name: The vector map name that should be used



                                  P1 P2 P3
           6                       *  *  *
           5
           4    _______ ___ ___   L1 L2 L3
        Y  3   |A1___ *|  *|  *|   |  |  |
           2   | |A2*| |   |   |   |  |  |
           1   | |___| |A3 |A4 |   |  |  |
           0   |_______|___|___|   |  |  |
          -1
            -1 0 1 2 3 4 5 6 7 8 9 10 12 14
                           X
    """

    from grass.pygrass.vector import VectorTopo
    from grass.pygrass.vector.geometry import Point, Line, Centroid, Boundary

    cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'name', 'varchar(50)'),
            (u'value', 'double precision')]
    with VectorTopo(map_name, mode='w', tab_name=map_name,
                    tab_cols=cols) as vect:

        # Write 3 points
        vect.write(Point(10, 6), cat=1, attrs=("point", 1))
        vect.write(Point(12, 6), cat=1)
        vect.write(Point(14, 6), cat=1)
        # Write 3 lines
        vect.write(Line([(10, 4), (10, 2), (10, 0)]), cat=2, attrs=("line", 2))
        vect.write(Line([(12, 4), (12, 2), (12, 0)]), cat=2)
        vect.write(Line([(14, 4), (14, 2), (14, 0)]), cat=2)
        # boundaries 1 - 4
        vect.write(Boundary(points=[(0, 0), (0, 4)]))
        vect.write(Boundary(points=[(0, 4), (4, 4)]))
        vect.write(Boundary(points=[(4, 4), (4, 0)]))
        vect.write(Boundary(points=[(4, 0), (0, 0)]))
        # 5. boundary (Isle)
        vect.write(Boundary(points=[(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)]))
        # boundaries 6 - 8
        vect.write(Boundary(points=[(4, 4), (6, 4)]))
        vect.write(Boundary(points=[(6, 4), (6, 0)]))
        vect.write(Boundary(points=[(6, 0), (4, 0)]))
        # boundaries 9 - 11
        vect.write(Boundary(points=[(6, 4), (8, 4)]))
        vect.write(Boundary(points=[(8, 4), (8, 0)]))
        vect.write(Boundary(points=[(8, 0), (6, 0)]))
        # Centroids, all have the same cat and attribute
        vect.write(Centroid(x=3.5, y=3.5), cat=3, attrs=("centroid", 3))
        vect.write(Centroid(x=2.5, y=2.5), cat=3)
        vect.write(Centroid(x=5.5, y=3.5), cat=3)
        vect.write(Centroid(x=7.5, y=3.5), cat=3)

        vect.organization = 'Thuenen Institut'
        vect.person = 'Soeren Gebbert'
        vect.title = 'Test dataset'
        vect.comment = 'This is a comment'

        vect.table.conn.commit()

        vect.organization = "Thuenen Institut"
        vect.person = "Soeren Gebbert"
        vect.title = "Test dataset"
        vect.comment = "This is a comment"
        vect.close()
     def run(self):
          logging.debug("Computation started")

          psc = self.input_psc.getValue()
          map_name = 'obce_psc_{}'.format(psc)

          obce = VectorTopo('obce', mapset='psc')
          obce.open('r')

          vystup = VectorTopo(map_name)
          vystup.open('w', tab_cols=[('cat',       'INTEGER PRIMARY KEY'),
                                     ('nazev',     'TEXT'),
                                     ('psc',       'INTEGER')])

          obec_id = None
          obce_psc = set()
          for prvek in obce.viter('areas'):
               if prvek.attrs is None:
                    continue
               if prvek.attrs['psc'] == psc:
                  if obec_id is None:
                      obec_id = prvek.id

                  for b in prvek.boundaries():
                      for n in b.read_area_ids():
                          if n != -1 and n != obec_id:
                              obce_psc.add(n)
          obce_psc.add(obec_id)

          hranice = list()
          cat = 1
          for prvek in obce.viter('areas'):
              if prvek.id not in obce_psc:
                  continue

              for b in prvek.boundaries():
                  if b.id not in hranice:
                      hranice.append(b.id)
                      vystup.write(b)

              vystup.write(prvek.centroid(), cat=cat, attrs=(prvek.attrs['nazev'], prvek.attrs['psc']))
              cat += 1

          vystup.table.conn.commit()

          vystup.close()
          obce.close()

          logging.debug("Computation finished")

          return map_name
示例#36
0
def post_trails(real_elev, scanned_elev, filterResults, timeToFinish, logDir,
                env, **kwargs):
    env2 = get_environment(raster='scan_saved')
    gisenv = gscript.gisenv()
    logFile = os.path.join(
        logDir, 'log_{}_trails1.csv'.format(gisenv['LOCATION_NAME']))
    scoreFile = os.path.join(logDir,
                             'score_{}.csv'.format(gisenv['LOCATION_NAME']))
    slopes = gscript.list_grouped(
        type='raster', pattern="trails1_slope_dir_*_*_*")[gisenv['MAPSET']]
    lines = gscript.list_grouped(
        type='vector', pattern="trails1_line_*_*_*")[gisenv['MAPSET']]
    times = [each.split('_')[-3:] for each in slopes]
    score_sum = []
    score_max = []
    score_length = []
    score_points = []
    with open(logFile, 'w') as f:
        f.write(
            'time,slope_min,slope_max,slope_mean,slope_sum,length,point_count\n'
        )
        for i in range(len(slopes)):
            data_slopes = gscript.parse_command('r.univar',
                                                map=slopes[i],
                                                flags='g',
                                                env=env2)
            score_sum.append(float(data_slopes['sum']))
            score_max.append(float(data_slopes['max']))
            with VectorTopo(lines[i], mode='r') as v:
                try:
                    line = v.read(1)
                    point_count = len(line)
                    length = line.length()
                except IndexError:
                    length = 0
                    point_count = 0
                score_points.append(point_count)
                score_length.append(length)
            time = times[i]
            f.write(
                '{time},{sl_min},{sl_max},{sl_mean},{sl_sum},{length},{cnt}\n'.
                format(time='{}:{}:{}'.format(time[0], time[1], time[2]),
                       sl_min=data_slopes['min'],
                       sl_max=data_slopes['max'],
                       sl_mean=data_slopes['mean'],
                       sl_sum=data_slopes['sum'],
                       length=length,
                       cnt=point_count))
    with open(scoreFile, 'a') as f:
        # to make sure we don't get error if they skip quickly
        count = 3
        if len(score_points) < count:
            # shouldn't happen
            count = len(score_points)
        score_points = score_points[-count:]
        num_points = max(score_points)
        idx = score_points.index(num_points)
        slsum_score = score_sum[-count:][idx]
        slmax_score = score_max[-count:][idx]
        length_score = score_length[-count:][idx]

        # here convert to some scale?
        f.write("trails 1: sum slope: {}\n".format(slsum_score))
        f.write("trails 1: max slope: {}\n".format(slmax_score))
        f.write("trails 1: length line: {}\n".format(length_score))
        f.write("trails 1: filtered scans: {}\n".format(filterResults))
        f.write("trails 1: time: {}\n".format(timeToFinish))
示例#37
0
def main(opt, flg):
    #
    # Set check variables
    #
    overwrite = True
    rasters = opt["rasters"].split(",") if opt["rasters"] else []
    rprefix = opt["rprefix"].split(",") if opt["rprefix"] else []

    def split(x):
        return x.split("@") if "@" in x else (x, "")

    vname, vmset = split(opt["vector"])
    shpcsv = opt["shpcsv"] if opt["shpcsv"] else vname + ".csv"
    rstcsv = (opt["rstcsv"].split(",") if opt["rstcsv"] else
              [split(rst)[0] + ".csv" for rst in rasters])
    zones = opt["zones"] if opt["zones"] else vname + "_zones"
    nprocs = int(opt.get("nprocs", 1))
    if rasters:
        if rprefix and len(rasters) != len(rprefix):
            raise
        if len(rasters) != len(rstcsv):
            raise
        prefixes = rprefix if rprefix else rasters
    else:
        prefixes = None

    skipshp = opt["skipshape"].split(",") if opt["skipshape"] else []
    skiprst = opt["skipunivar"].split(",") if opt["skipunivar"] else []
    layer = int(opt["layer"])
    newlayer = int(opt["newlayer"])
    newlayername = opt["newlayername"] if opt[
        "newlayername"] else vname + "_stats"
    newtabname = opt["newtabname"] if opt["newtabname"] else vname + "_stats"
    rstpercentile = float(opt["rstpercentile"])
    separator = opt.get("separator", ";")

    #
    # compute
    #
    if not os.path.exists(shpcsv):
        get_shp_csv(opt["vector"], shpcsv, overwrite, separator)
    if not get_mapset_raster(zones):
        get_zones(opt["vector"], zones, layer)
    if not rstcsv or not os.path.exists(rstcsv[0]):
        get_rst_csv(rasters, zones, rstcsv, rstpercentile, overwrite, nprocs,
                    separator)

    newlink = Link(newlayer, newlayername, newtabname)
    newtab = newlink.table()
    with Vector(vname, vmset, mode="r", layer=layer) as vct:
        mode = "r" if newlink in vct.dblinks else "rw"

    with VectorTopo(vname, vmset, mode=mode, layer=layer) as vct:
        update_cols(newtab,
                    shpcsv,
                    rstcsv,
                    prefixes,
                    skipshp,
                    skiprst,
                    separator=separator)

        if mode == "rw":
            # add the new link
            vct.dblinks.add(newlink)
            vct.build()
示例#38
0
def main():
    """
    Input for GSFLOW
    """

    reg = grass.region()

    options, flags = grass.parser()

    basin_mouth_E = options['E']
    basin_mouth_N = options['N']

    accum_thresh = options['threshold']

    # Create drainage direction, flow accumulation, and rivers

    # Manually create streams from accumulation.
    # The one funny step is the cleaning w/ snap, because r.thin allows cells that are
    # diagonal to each other to be next to each other -- creating boxes along the channel
    # that are not consistenet with stream topology
    grass.mapcalc('streams_unthinned = flowAccum > '+str(accum_thresh), overwrite=True)
    grass.run_command('r.null', map='streams_unthinned', setnull=0)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams_raw', type='line', overwrite=True)
    grass.run_command('v.clean', input='streams_raw', output='streams', tool='snap', threshold=1.42*(grass.region()['nsres'] + grass.region()['ewres'])/2., flags='c', overwrite=True) # threshold is one cell
    grass.run_command('v.to.rast', input='streams', output='streams_unthinned', use='val', val=1, overwrite=True)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams', type='line', overwrite=True)
    grass.run_command('v.to.rast', input='streams', output='streams', use='cat', overwrite=True)
    # Create drainage basins
    grass.run_command('r.stream.basins', direction='drainageDirection', stream_rast='streams', basins='basins', overwrite=True)
    # If there is any more need to work with nodes, I should check the code I wrote for Kelly Monteleone's paper -- this has river identification and extraction, including intersection points.


    # Vectorize drainage basins
    grass.run_command('r.to.vect', input='basins', output='basins', type='area', flags='v', overwrite=True)

    # Then remove all sub-basins and segments that have negative flow accumulation
    # (i.e. have contributions from outside the map)

    ###################################################################
    # Intermediate step: Remove all basins that have offmap flow
    # i.e., those containing cells with negative flow accumulation
    ###################################################################

    # Method 3 -- even easier
    grass.mapcalc("has_offmap_flow = (flowAccum < 0)", overwrite=True)
    grass.run_command('r.null', map='has_offmap_flow', setnull=0)
    grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
    grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
    grass.run_command('v.db.addcolumn', map='has_offmap_flow', columns='badbasin_cats integer')
    grass.run_command('v.what.vect', map='has_offmap_flow', column='badbasin_cats', query_map='basins', query_column='cat', dmax=60)
    colNames = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['columns'])
    # offmap incoming flow points
    colValues = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['values'].values())
    badcats = colValues[:,colNames == 'badbasin_cats'].squeeze()
    badcats = badcats[badcats != '']
    badcats = badcats.astype(int)
    badcats = list(set(list(badcats)))
    # basins for full cat list
    colNames = np.array(grass.vector_db_select('basins', layer=1)['columns'])
    colValues = np.array(grass.vector_db_select('basins', layer=1)['values'].values())
    allcats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    allcats = list(set(list(allcats)))
    # xor to goodcats
    #goodcats = set(badcats).symmetric_difference(allcats)
    # but better in case somehow there are badcats that are not allcats to do NOT
    goodcats = list(set(allcats) - set(badcats))
    goodcats_str = ''
    for cat in goodcats:
      goodcats_str += str(cat) + ','
    goodcats_str = goodcats_str[:-1] # super inefficient but quick
    grass.run_command('g.rename', vect='basins,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='basins', cats=goodcats_str)
    grass.run_command('g.rename', vect='streams,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='streams', cats=goodcats_str)
    #grass.run_command('g.rename', vect='stream_nodes,tmp', overwrite=True)
    #grass.run_command('v.extract', input='tmp', output='stream_nodes', cats=goodcats_str)

    # Fix pixellated pieces -- formerly here due to one-pixel-basin issue
    reg = grass.region()
    grass.run_command('g.rename', vect='basins,basins_messy', overwrite=True)
    grass.run_command('v.clean', input='basins_messy', output='basins', tool='rmarea', threshold=reg['nsres']*reg['ewres'], overwrite=True)

    # Optional, but recommended becuase not all basins need connect:
    # choose a subset of the region in which to do the PRMS calculation
    grass.run_command( 'r.water.outlet', input='drainageDirection', output='studyBasin', coordinates=str(basin_mouth_E)+','+str(basin_mouth_N) , overwrite=True)
    # Vectorize
    grass.run_command( 'r.to.vect', input='studyBasin', output='studyBasin', type='area', overwrite=True)
    # If there are dangling areas (single-pixel?), just drop them. Not sure if this is the best way to do it
    # No check for two equal areas -- if we have this, there are more fundamental problems in defining 
    # a watershed in contiguous units

    #"""
    # ONLY IF MORE THAN ONE STUDY BASIN -- remove small areas
    grass.run_command( 'v.db.addcolumn', map='studyBasin', columns='area_m2 double precision' )
    grass.run_command( 'v.db.dropcolumn', map='studyBasin', columns='label' )
    grass.run_command( 'v.to.db', map='studyBasin', columns='area_m2', option='area', units='meters')
    drainageAreasRaw = sorted( grass.parse_command( 'v.db.select', map='studyBasin', flags='c').keys() ) # could update to grass.vector_db_select
    drainageAreasList = []
    for row in drainageAreasRaw:
      # cat, area
      drainageAreasList.append(row.split('|'))
    drainageAreasOnly = np.array(drainageAreasList).astype(float)
    catsOnly = drainageAreasOnly[:,0].astype(int)
    drainageAreasOnly = drainageAreasOnly[:,1]
    row_with_max_drainage_area = (drainageAreasOnly == np.max(drainageAreasOnly)).nonzero()[0][0]
    cat_with_max_drainage_area = catsOnly[row_with_max_drainage_area]
    grass.run_command('g.rename', vect='studyBasin,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='studyBasin', cats=cat_with_max_drainage_area, overwrite=True)
    grass.run_command('g.remove', type='vector', name='tmp', flags='f')
    grass.run_command('v.to.rast', input='studyBasin', output='studyBasin', use='val', value=1, overwrite=True)
    #"""
    """
    # Remove small areas -- easier, though not as sure, as the method above
    grass.run_command('v.rename', vect='studyBasin,tmp', overwrite=True)
    grass.run_command('v.clean', input='tmp', output='studyBasin', tool='rmarea', threshold=1.01*(grass.region()['nsres'] * grass.region()['ewres']), flags='c', overwrite=True) # threshold is one cell
    """


    ###############
    # PLACEHOLDER #
    ###################################################################
    # To do in near future: limit to this basin
    ###################################################################

    # Next, get the order of basins the old-fashioned way: coordinates of endpoints of lines
    # Because I can't use GRASS to query multiple points
    #grass.run_command('v.extract', input='streams', output='streamSegments', type='line', overwrite=True)
    # Maybe I don't even need nodes! 9/4/16 -- nope, doesn't seem so.
    grass.run_command('g.copy', rast='streams,streamSegments')
    grass.run_command('v.db.addcolumn', map='streamSegments', columns='z double precision, flow_accum double precision, x1 double precision, y1 double precision, x2 double precision, y2 double precision')
    grass.run_command('v.to.db', map='streamSegments', option='start', columns='x1, y1')
    grass.run_command('v.to.db', map='streamSegments', option='end', columns='x2, y2')

    colNames = np.array(grass.vector_db_select('streamSegments')['columns'])
    colValues = np.array(grass.vector_db_select('streamSegments')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # xy1: UPSTREAM
    # xy2: DOWNSTREAM
    # (I checked.)
    # So now can use this information to find headwaters and mouths

    # Not sure that thsi is necessary
    nsegs_at_point_1 = []
    nsegs_at_point_2 = []
    for row in xy1:
      nsegs_at_point_1.append(np.sum( np.prod(xy == row, axis=1)))
    for row in xy2:
      nsegs_at_point_2.append(np.sum( np.prod(xy == row, axis=1)))
    nsegs_at_point_1 = np.array(nsegs_at_point_1)
    nsegs_at_point_2 = np.array(nsegs_at_point_2)


    # HRU's have same numbers as their enclosed segments
    # NOT TRUE IN GENERAL -- JUST FOR THIS CASE WITH SUB-BASINS -- WILL NEED TO FIX IN FUTURE



    #############
    # Now, let's copy/rename the sub-basins to HRU and the streamSegments to segment and give them attributes
    ###########################################################################################################

    # Attributes (in order given in manual)

    # HRU
    hru_columns = []
    # Self ID
    hru_columns.append('id integer') # nhru
    # Basic Physical Attributes (Geometry)
    hru_columns.append('hru_area double precision') # acres (!!!!)
    hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    hru_columns.append('hru_elev double precision') # Mean elevation
    hru_columns.append('hru_lat double precision') # Latitude of centroid
    hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Basic Physical Attributes (Other)
    #hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1
    #hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default.
    # Measured input
    hru_columns.append('outlet_sta integer') # Index of streamflow station at basin outlet:
                                         #   station number if it has one, 0 if not
    #    Note that the below specify projections and note lat/lon; they really seem
    #    to work for any projected coordinates, with _x, _y, in meters, and _xlong, 
    #    _ylat, in feet (i.e. they are just northing and easting). The meters and feet
    #    are not just simple conversions, but actually are required for different
    #    modules in the code, and are hence redundant but intentional.
    hru_columns.append('hru_x double precision') # Easting [m]
    hru_columns.append('hru_xlong double precision') # Easting [feet]
    hru_columns.append('hru_y double precision') # Northing [m]
    hru_columns.append('hru_ylat double precision') # Northing [feet]
    # Streamflow and lake routing
    hru_columns.append('K_coef double precision') # Travel time of flood wave to next downstream segment;
                                                  #   this is the Muskingum storage coefficient
                                                  #   1.0 for reservoirs, diversions, and segments flowing
                                                  #   out of the basin
    hru_columns.append('x_coef double precision') # Amount of attenuation of flow wave;
                                                  #   this is the Muskingum routing weighting factor
                                                  #   range: 0.0--0.5; default 0.2
                                                  #   0 for all segments flowing out of the basin
    hru_columns.append('hru_segment integer') # ID of stream segment to which flow will be routed
                                              #   this is for non-cascade routing (flow goes directly
                                              #   from HRU to stream segment)
    hru_columns.append('obsin_segment integer') # Index of measured streamflow station that replaces
                                                #   inflow to a segment

    # Segments
    segment_columns = []
    # Self ID
    segment_columns.append('id integer') # nsegment
    # Streamflow and lake routing
    segment_columns.append('tosegment integer') # Index of downstream segment to which a segment
                                                #   flows (thus differentiating it from hru_segment,
                                                #   which is for HRU's, though segment and HRU ID's
                                                #   are the same when HRU's are sub-basins

    # PRODUCE THE DATA TABLES
    ##########################

    # Create strings
    hru_columns = ",".join(hru_columns)
    segment_columns = ",".join(segment_columns)

    #"""
    # Copy
    grass.run_command('g.copy', vect='basins,HRU', overwrite=True)
    grass.run_command('g.copy', vect='streamSegments,segment', overwrite=True)
    #"""

    # Rename / subset
    """
    # OR GO BACK TO HRU_messy
    grass.run_command('v.overlay', ainput='basins', binput='studyBasin', operator='and', output='HRU_messy', overwrite=True)
    grass.run_command('v.overlay', ainput='streamSegments', binput='studyBasin', operator='and', output='segment_messy', overwrite=True)
    # And clean as well
    grass.run_command('v.clean', input='HRU_messy', output='HRU', tool='rmarea', threshold=reg['nsres']*reg['ewres']*40, overwrite=True)
    grass.run_command('v.clean', input='segment_messy', output='segment', tool='rmdangle', threshold=reg['nsres']*2, overwrite=True)
    # And now that the streams and HRU's no longer have the same cat values, fix 
    # this.
    grass.run_command('v.db.droptable', map='HRU', flags='f')
    grass.run_command('v.db.droptable', map='segment', flags='f')
    #grass.run_command('v.category', input='HRU', option='del', cat='-1', out='tmp', overwrite=True)
    #grass.run_command('v.category', input='tmp', option='add', out='HRU' overwrite=True)
    grass.run_command('v.db.addtable', map='HRU')
    grass.run_command('v.db.addtable', map='segment')

    grass.run_comm


    v.clean HRU
    v.clean
    v
    v.what.vect 
    """

    #grass.run_command('v.clean', input='segment_messy', output='HRU', tool='rmarea', threshold=reg['nsres']*reg['ewres']*20, overwrite=True)


    # Add columns to tables
    grass.run_command('v.db.addcolumn', map='HRU', columns=hru_columns)
    grass.run_command('v.db.addcolumn', map='segment', columns=segment_columns)


    # Produce the data table entries
    ##################################

    """
    # ID numbers
    # There should be a way to do this all at once, but...
    for i in range(len(cats)):
      grass.run_command('v.db.update', map='HRU', column='id', value=nhru[i], where='cat='+str(cats[i]))
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    for i in range(len(cats)):
      grass.run_command('v.db.update', map='segment', column='id', value=nsegment[i], where='cat='+str(cats[i]))
    """

    nhru = np.arange(1, xy1.shape[0]+1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
    # Access the HRU's 
    hru = VectorTopo('HRU')
    # Open the map with topology:
    hru.open('rw')
    # Create a cursor
    cur = hru.table.conn.cursor()
    # Use it to loop across the table
    cur.executemany("update HRU set id=? where cat=?", nhrut)
    # Commit changes to the table
    hru.table.conn.commit()
    # Close the table
    hru.close()

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # Same for segments
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general

    # Somehow only works after I v.clean, not right after v.overlay
    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set id=? where cat=?", nsegmentt)
    segment.table.conn.commit()
    segment.close()

    #hru_columns.append('hru_area double precision')
    grass.run_command('v.to.db', map='HRU', option='area', columns='hru_area', units='acres')

    # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN

    # hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    # hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Slope
    grass.run_command('r.slope.aspect', elevation='srtm', slope='tmp', aspect='aspect', format='percent', overwrite=True) # zscale=0.01 also works to make percent be decimal 0-1
    grass.mapcalc('slope = tmp / 100.', overwrite=True)
    grass.run_command('v.rast.stats', map='HRU', raster='slope', method='average', column_prefix='tmp', flags='c')
    grass.run_command('v.db.update', map='HRU', column='hru_slope', query_column='tmp_average')
    grass.run_command('v.db.dropcolumn', map='HRU', column='tmp_average')
    # Dealing with conversion from degrees (no good average) to something I can
    # average -- x- and y-vectors
    # Geographic coordinates, so sin=x, cos=y.... not that it matters so long 
    # as I am consistent in how I return to degrees
    grass.mapcalc('aspect_x = sin(aspect)', overwrite=True)
    grass.mapcalc('aspect_y = cos(aspect)', overwrite=True)
    #grass.run_command('v.db.addcolumn', map='HRU', columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer')
    grass.run_command('v.rast.stats', map='HRU', raster='aspect_x', method='sum', column_prefix='aspect_x', flags='c')
    grass.run_command('v.rast.stats', map='HRU', raster='aspect_y', method='sum', column_prefix='aspect_y', flags='c')
    # Not actually needed, but maybe good to know
    #grass.run_command('v.rast.stats', map='HRU', raster='aspect_y', method='number', column_prefix='tmp', flags='c')
    #grass.run_command('v.db.renamecolumn', map='HRU', column='tmp_number,ncells_in_hru')
    # NO TRIG FUNCTIONS IN SQLITE!
    #grass.run_command('v.db.update', map='HRU', column='hru_aspect', query_column='DEGREES(ATN2(aspect_y_sum, aspect_x_sum))') # Getting 0, why?
    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" %hru.name)
    _arr = np.array(cur.fetchall())
    _cat = _arr[:,0]
    _aspect_x_sum = _arr[:,1]
    _aspect_y_sum = _arr[:,2]
    aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180./np.pi
    aspect_angle[aspect_angle < 0] += 360 # all positive
    aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose()
    cur.executemany("update HRU set hru_aspect=? where cat=?", aspect_angle_cat)
    hru.table.conn.commit()
    hru.close()

    # hru_columns.append('hru_elev double precision') # Mean elevation
    grass.run_command('v.rast.stats', map='HRU', raster='srtm', method='average', column='tmp', flags='c')
    grass.run_command('v.db.update', map='HRU', column='hru_elev', query_column='tmp_average')
    grass.run_command('v.db.dropcolumn', map='HRU', column='tmp_average')

    # get x,y of centroid -- but have areas not in database table, that do have
    # centroids, and having a hard time finding a good way to get rid of them!
    # They have duplicate category values!
    # Perhaps these are little dangles on the edges of the vectorization where
    # the raster value was the same but pinched out into 1-a few cells?
    # From looking at map, lots of extra centroids on area boundaries, and removing
    # small areas (though threshold hard to guess) gets rid of these

    """
    g.copy vect=HRU,HRUorig # HACK!!!
    v.clean in=HRUorig out=HRU tool=rmarea --o thresh=15000
    """

    #grass.run_command( 'g.rename', vect='HRU,HRU_too_many_centroids')
    #grass.run_command( 'v.clean', input='HRU_too_many_centroids', output='HRU', tool='rmdac')
    grass.run_command('v.db.addcolumn', map='HRU', columns='centroid_x double precision, centroid_y double precision')
    grass.run_command( 'v.to.db', map='HRU', type='centroid', columns='centroid_x, centroid_y', option='coor', units='meters')

    # hru_columns.append('hru_lat double precision') # Latitude of centroid
    colNames = np.array(grass.vector_db_select('HRU', layer=1)['columns'])
    colValues = np.array(grass.vector_db_select('HRU', layer=1)['values'].values())
    xy = colValues[:,(colNames=='centroid_x') + (colNames=='centroid_y')]
    np.savetxt('_xy.txt', xy, delimiter='|', fmt='%s')
    grass.run_command('m.proj', flags='od', input='_xy.txt', output='_lonlat.txt', overwrite=True)
    lonlat = np.genfromtxt('_lonlat.txt', delimiter='|',)[:,:2]
    lonlat_cat = np.concatenate((lonlat, np.expand_dims(_cat, 2)), axis=1)

    # why not just get lon too?
    grass.run_command('v.db.addcolumn', map='HRU', columns='hru_lon double precision')

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_lon=?, hru_lat=? where cat=?", lonlat_cat)
    hru.table.conn.commit()
    hru.close()

    # Easting and Northing for other columns
    grass.run_command('v.db.update', map='HRU', column='hru_x', query_column='centroid_x')
    grass.run_command('v.db.update', map='HRU', column='hru_xlong', query_column='centroid_x*3.28084') # feet
    grass.run_command('v.db.update', map='HRU', column='hru_y', query_column='centroid_y')
    grass.run_command('v.db.update', map='HRU', column='hru_ylat', query_column='centroid_y*3.28084') # feet


    # Streamflow and lake routing
    # tosegment
    """
    # THIS IS THE NECESSARY PART
    # CHANGED (BELOW) TO RE-DEFINE NUMBERS IN SEQUENCE AS HRU'S INSTEAD OF USING
    # THE CAT VALUES
    # Get the first channels in the segment
    tosegment = np.zeros(len(cats)) # default to 0 if they do not flow to another segment
    # Loop over all segments
    #for i in range(len(cats)):
    # From outlet segment
    for i in range(len(xy2)):
      # to inlet segment
      inlets = np.prod(xy1 == xy2[i], axis=1)
      # Update inlet segments with ID of outlets
      tosegment[inlets.nonzero()] = cats[i]
    tosegment_cat = tosegment.copy()
    """

    tosegment_cats = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    tosegment = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    # From outlet segment
    for i in range(len(xy2)):
      # to outlet segment
      outlets = np.prod(xy2 == xy1[i], axis=1)
      # Update outlet segments with ID of inlets
      tosegment[outlets.nonzero()] = nhru[i]
      tosegment_cats[outlets.nonzero()] = cats[i]

    """
      # BACKWARDS!
      # to inlet segment
      inlets = np.prod(xy1 == xy2[i], axis=1)
      # Update inlet segments with ID of outlets
      tosegment_cats[inlets.nonzero()] = cats[i]
    """

    # Now, just update tosegment (segments) and hru_segment (hru's)
    # In this case, they are the same.
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general
    # Tuple for upload to SQL
    # 0 is the default value if it doesn't go into any other segment (i.e flows
    # off-map)
    tosegmentt = []
    tosegment_cats_t = []
    for i in range(len(nsegment)):
      tosegmentt.append( (tosegment[i], nsegment[i]) )
      tosegment_cats_t.append( (tosegment_cats[i], cats[i]) )
    # Once again, special case
    hru_segmentt = tosegmentt

    # Loop check!
    # Weak loop checker - will only detect direct ping-pong.
    loops = []
    tosegmenta = np.array(tosegmentt)
    for i in range(len(tosegmenta)):
      for j in range(len(tosegmenta)):
        if (tosegmenta[i] == tosegmenta[j][::-1]).all():
          loops.append(tosegmenta[i])

    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set tosegment=? where id=?", tosegmentt)
    segment.table.conn.commit()
    segment.close()

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_segment=? where id=?", hru_segmentt)
    hru.table.conn.commit()
    hru.close()


    #grass.run_command('g.rename', vect='HRU_all_2,HRU', overwrite=True)
    #grass.run_command('g.rename', vect='segment_all_2,segment', overwrite=True)

    # In study basin?
    grass.run_command('v.db.addcolumn', map='segment', columns='in_study_basin int')
    grass.run_command('v.db.addcolumn', map='HRU', columns='in_study_basin int')
    grass.run_command('v.what.vect', map='segment', column='in_study_basin', query_map='studyBasin', query_column='value')
    grass.run_command('v.what.vect', map='HRU', column='in_study_basin', query_map='segment', query_column='in_study_basin')

    # Save global segment+HRU
    grass.run_command('g.rename', vect='HRU,HRU_all')
    grass.run_command('g.rename', vect='segment,segment_all')

    # Output HRU -- will need to ensure that this is robust!
    grass.run_command('v.extract', input='HRU_all', output='HRU', where='in_study_basin=1', overwrite=True)
    grass.run_command('v.extract', input='segment_all', output='segment', where='in_study_basin=1', overwrite=True)


    colNames = np.array(grass.vector_db_select('segment')['columns'])
    colValues = np.array(grass.vector_db_select('segment')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # Redo nhru down here
    nhru = np.arange(1, xy1.shape[0]+1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
      """
      n = 1
      if i != 1:
        nhrut.append( (n, cats[i]) )
        n += 1
      """
      
    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set id=? where cat=?", nhrut)
    hru.table.conn.commit()
    hru.close()

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # Same for segments
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general

    # Somehow only works after I v.clean, not right after v.overlay
    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set id=? where cat=?", nsegmentt)
    segment.table.conn.commit()
    segment.close()


    tosegment_cats = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    tosegment = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    # From outlet segment
    for i in range(len(xy2)):
      # to outlet segment
      outlets = np.prod(xy2 == xy1[i], axis=1)
      # Update outlet segments with ID of inlets
      tosegment[outlets.nonzero()] = nhru[i]
      tosegment_cats[outlets.nonzero()] = cats[i]

    # Now, just update tosegment (segments) and hru_segment (hru's)
    # In this case, they are the same.
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general
    # Tuple for upload to SQL
    # 0 is the default value if it doesn't go into any other segment (i.e flows
    # off-map)
    tosegmentt = []
    tosegment_cats_t = []
    for i in range(len(nsegment)):
      tosegmentt.append( (tosegment[i], nsegment[i]) )
      tosegment_cats_t.append( (tosegment_cats[i], cats[i]) )
    # Once again, special case
    hru_segmentt = tosegmentt

    # Loop check!
    # Weak loop checker - will only detect direct ping-pong.
    loops = []
    tosegmenta = np.array(tosegmentt)
    for i in range(len(tosegmenta)):
      for j in range(len(tosegmenta)):
        if (tosegmenta[i] == tosegmenta[j][::-1]).all():
          loops.append(tosegmenta[i])


    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set tosegment=? where id=?", tosegmentt)
    segment.table.conn.commit()
    segment.close()

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_segment=? where id=?", hru_segmentt)
    hru.table.conn.commit()
    hru.close()

    # More old-fashioned way:
    os.system('v.db.select segment sep=comma > segment.csv')
    os.system('v.db.select HRU sep=comma > HRU.csv')
    # and then sort by id, manually
    # And then manually change the last segment's "tosegment" to 0.
    # Except in this case, it was 0!
    # Maybe I managed to do this automatically above... but tired and late, 
    # so will check later
    # but hoping I did something right by re-doing all of the above before
    # saving (and doing so inside this smaller basin)

    print ""
    print "PRMS PORTION COMPLETE."
    print ""



    ###########
    # MODFLOW #
    ###########

    print ""
    print "STARTING MODFLOW PORTION."
    print ""

    # Generate coarse box for MODFLOW (ADW, 4 September, 2016)

    grass.run_command('g.region', rast='srtm')
    grass.run_command('g.region', n=7350000, s=7200000, w=170000, e=260000)
    reg = grass.region()
    MODFLOWres = 2000.
    grass.run_command('v.to.rast', input='HRU', output='allHRUs', use='val', val=1.0, overwrite=True)
    grass.run_command('r.null', map='allHRUs', null='0')
    grass.run_command('r.colors', map='allHRUs', color='grey', flags='n')
    grass.run_command('g.region', res=MODFLOWres)
    grass.run_command('r.resamp.stats', method='average', input='allHRUs', output='fraction_of_HRU_in_MODFLOW_cell', overwrite=True)
    grass.run_command('r.colors', map='fraction_of_HRU_in_MODFLOW_cell', color='grey', flags='n')


    print ""
    print "MODFLOW PORTION COMPLETE."
    print ""
示例#39
0
def vect(stream_in_name, stream_out_name,
         direction_in_name, accumulation_in_name, distance_in_name):
    '''Builds vector map from stream raster map.'''

    # Instantiate maps
    print "Fetching maps..."
    stream_in       = RasterRowIO(stream_in_name)
    direction_in    = RasterSegment(direction_in_name)
    accumulation_in = RasterSegment(accumulation_in_name)
    distance_in     = RasterSegment(distance_in_name)

    # Initialize output
    stream_out      = VectorTopo(stream_out_name)
    # Define the new vector map attribute table columns
    columns = [(u"cat", "INTEGER PRIMARY KEY"),
               (u"fid", "INTEGER"),
               (u"accum", "DOUBLE"),
               (u"dist", "DOUBLE"),
               (u"source_i", "INTEGER"),
               (u"source_j", "INTEGER"),
               (u"target_i", "INTEGER"),
               (u"target_j", "INTEGER")]
    print "Opening output..."
    stream_out.open('w', tab_name = stream_out_name, tab_cols = columns)

    # Open maps
    print "Loading maps..."
    stream_in.open('r')
    direction_in.open(mode = 'r')
    accumulation_in.open(mode = 'r')
    distance_in.open(mode = 'r')

    # Get the current region to compute coordinates
    region = Region()
    x_shift = region.ewres*.5
    y_shift = region.nsres*.5*(-1.0)


    print "Processing..."
    # For each stream cell...
    i = 0
    for row in stream_in:

        j = 0
        for cell in row:

            if cell < 0:
                j += 1
                continue

            # Retrieve data (direction, accumulation and distance)
            direction    = direction_in[i, j]
            accumulation = accumulation_in[i, j]
            distance     = distance_in[i, j]

            # Get i and j shifts from direction
            (di, dj) = shift[direction]

            # Compute unit vector start and end geo coordinates
            (source_y, source_x) = pixel2coor((j,      i),      region)
            (target_y, target_x) = pixel2coor((j + dj, i + di), region)

            # Build unit vector
            stream_out.write(Line([(source_x + x_shift, source_y + y_shift),
                                   (target_x + x_shift, target_y + y_shift)]),
                             (cell, accumulation, distance, i, j, i + di, j + dj)
                             )

            j += 1

        i += 1

    # Commit database changes
    stream_out.table.conn.commit()

    # Close maps
    stream_in.close()
    direction_in.close()
    accumulation_in.close()
    stream_out.close()
示例#40
0
def main():

    input = options["input"]
    if options["refline"]:
        refline_cat = int(options["refline"])
    else:
        refline_cat = None
    nb_vertices = int(options["vertices"])
    if options["range"]:
        search_range = float(options["range"])
    else:
        search_range = None
    output = options["output"]
    transversals = flags["t"]
    median = flags["m"]

    global tmp_points_map
    global tmp_centerpoints_map
    global tmp_line_map
    global tmp_cleaned_map
    global tmp_map
    tmp_points_map = "points_map_tmp_%d" % os.getpid()
    tmp_centerpoints_map = "centerpoints_map_tmp_%d" % os.getpid()
    tmp_line_map = "line_map_tmp_%d" % os.getpid()
    tmp_cleaned_map = "cleaned_map_tmp_%d" % os.getpid()
    tmp_map = "generaluse_map_tmp_%d" % os.getpid()

    nb_lines = grass.vector_info_topo(input)["lines"]

    # Find best reference line and max distance between centerpoints of lines
    segment_input = ""
    categories = grass.read_command("v.category",
                                    input=input,
                                    option="print",
                                    quiet=True).splitlines()
    for category in categories:
        segment_input += "P {}".format(category.strip())
        segment_input += " {} {}".format(category.strip(), " 50%")
        segment_input += os.linesep

    grass.write_command(
        "v.segment",
        input=input,
        output=tmp_centerpoints_map,
        rules="-",
        stdin=segment_input,
        quiet=True,
    )

    center_distances = grass.read_command(
        "v.distance",
        from_=tmp_centerpoints_map,
        to=tmp_centerpoints_map,
        upload="dist",
        flags="pa",
        quiet=True,
    ).splitlines()

    cats = []
    mean_dists = []
    count = 0
    distmax = 0
    for center in center_distances:
        if count < 2:
            count += 1
            continue
        cat = center.strip().split("|")[0]
        distsum = 0
        for x in center.strip().split("|")[1:]:
            distsum += float(x)
        mean_dist = distsum / len(center.strip().split("|")[1:])
        cats.append(cat)
        mean_dists.append(mean_dist)

    if transversals and not search_range:
        search_range = sum(mean_dists) / len(mean_dists)
        grass.message(_("Calculated search range:  %.5f." % search_range))

    if not refline_cat:
        refline_cat = sorted(zip(cats, mean_dists),
                             key=lambda tup: tup[1])[0][0]

        grass.message(
            _("Category number of chosen reference line: %s." % refline_cat))

    # Use transversals algorithm
    if transversals:

        # Break any intersections in the original lines so that
        # they do not interfere further on
        grass.run_command("v.clean",
                          input=input,
                          output=tmp_cleaned_map,
                          tool="break",
                          quiet=True)

        xmean = []
        ymean = []
        xmedian = []
        ymedian = []
        step = 100.0 / nb_vertices

        os.environ["GRASS_VERBOSE"] = "-1"

        for vertice in range(0, nb_vertices + 1):
            # v.segment sometimes cannot find points when
            # using 0% or 100% offset
            length_offset = step * vertice
            if length_offset < 0.00001:
                length_offset = 0.00001
            if length_offset > 99.99999:
                length_offset = 99.9999
            # Create endpoints of transversal
            segment_input = "P 1 %s %.5f%% %f\n" % (
                refline_cat,
                length_offset,
                search_range,
            )
            segment_input += "P 2 %s %.5f%% %f\n" % (
                refline_cat,
                length_offset,
                -search_range,
            )
            grass.write_command(
                "v.segment",
                input=input,
                output=tmp_points_map,
                stdin=segment_input,
                overwrite=True,
            )

            # Create transversal
            grass.write_command(
                "v.net",
                points=tmp_points_map,
                output=tmp_line_map,
                operation="arcs",
                file="-",
                stdin="99999 1 2",
                overwrite=True,
            )

            # Patch transversal onto cleaned input lines
            maps = tmp_cleaned_map + "," + tmp_line_map
            grass.run_command("v.patch",
                              input=maps,
                              out=tmp_map,
                              overwrite=True)

            # Find intersections
            grass.run_command(
                "v.clean",
                input=tmp_map,
                out=tmp_line_map,
                tool="break",
                error=tmp_points_map,
                overwrite=True,
            )

            # Add categories to intersection points
            grass.run_command(
                "v.category",
                input=tmp_points_map,
                out=tmp_map,
                op="add",
                overwrite=True,
            )

            # Get coordinates of points
            coords = grass.read_command("v.to.db",
                                        map=tmp_map,
                                        op="coor",
                                        flags="p").splitlines()

            count = 0
            x = []
            y = []
            for coord in coords:
                x.append(float(coord.strip().split("|")[1]))
                y.append(float(coord.strip().split("|")[2]))

            # Calculate mean and median for this transversal
            if len(x) > 0:
                xmean.append(sum(x) / len(x))
                ymean.append(sum(y) / len(y))

                x.sort()
                y.sort()

                xmedian.append((x[(len(x) - 1) // 2] + x[(len(x)) // 2]) / 2)
                ymedian.append((y[(len(y) - 1) // 2] + y[(len(y)) // 2]) / 2)

        del os.environ["GRASS_VERBOSE"]

    # Use closest point algorithm
    else:

        # Get reference line calculate its length
        grass.run_command("v.extract",
                          input=input,
                          output=tmp_line_map,
                          cats=refline_cat,
                          quiet=True)

        os.environ["GRASS_VERBOSE"] = "0"
        lpipe = grass.read_command("v.to.db",
                                   map=tmp_line_map,
                                   op="length",
                                   flags="p").splitlines()
        del os.environ["GRASS_VERBOSE"]

        for l in lpipe:
            linelength = float(l.strip().split("|")[1])

        step = linelength / nb_vertices

        # Create reference points for vertice calculation
        grass.run_command(
            "v.to.points",
            input=tmp_line_map,
            output=tmp_points_map,
            dmax=step,
            quiet=True,
        )

        nb_points = grass.vector_info_topo(tmp_points_map)["points"]

        cat = []
        x = []
        y = []

        # Get coordinates of closest points on all input lines
        if search_range:
            points = grass.read_command(
                "v.distance",
                from_=tmp_points_map,
                from_layer=2,
                to=input,
                upload="to_x,to_y",
                dmax=search_range,
                flags="pa",
                quiet=True,
            ).splitlines()
        else:
            points = grass.read_command(
                "v.distance",
                from_=tmp_points_map,
                from_layer=2,
                to=input,
                upload="to_x,to_y",
                flags="pa",
                quiet=True,
            ).splitlines()

        firstline = True
        for point in points:
            if firstline:
                firstline = False
                continue
            cat.append((int(point.strip().split("|")[0])))
            x.append(float(point.strip().split("|")[2]))
            y.append(float(point.strip().split("|")[3]))

        # Calculate mean coordinates
        xsum = [0] * nb_points
        ysum = [0] * nb_points
        linecount = [0] * nb_points

        for i in range(len(cat)):
            index = cat[i] - 1
            linecount[index] += 1
            xsum[index] = xsum[index] + x[i]
            ysum[index] = ysum[index] + y[i]

        xmean = [0] * nb_points
        ymean = [0] * nb_points

        for c in range(0, nb_points):
            xmean[c] = xsum[c] / linecount[c]
            ymean[c] = ysum[c] / linecount[c]

        # Calculate the median

        xmedian = [0] * nb_points
        ymedian = [0] * nb_points

        for c in range(0, nb_points):
            xtemp = []
            ytemp = []
            for i in range(len(cat)):
                if cat[i] == c + 1:
                    xtemp.append(x[i])
                    ytemp.append(y[i])
            xtemp.sort()
            ytemp.sort()
            xmedian[c] = (xtemp[(len(xtemp) - 1) // 2] +
                          xtemp[(len(xtemp)) // 2]) / 2
            ymedian[c] = (ytemp[(len(ytemp) - 1) // 2] +
                          ytemp[(len(ytemp)) // 2]) / 2

    # Create new line and write to file
    if median and nb_lines > 2:
        line = geo.Line(list(zip(xmedian, ymedian)))
    else:
        if median and nb_lines <= 2:
            grass.message(
                _("More than 2 lines necesary for median, using mean."))
        line = geo.Line(list(zip(xmean, ymean)))

    new = VectorTopo(output)
    new.open("w")

    new.write(line)
    new.close()