Esempio n. 1
0
def write_plants(plants, output, efficiency, min_power):
    # create vetor segment
    new_vec = VectorTopo(output)
    #TODO:  check if the vector already exists
    new_vec.layer = 1
    new_vec.open('w', tab_cols=COLS)
    reg = Region()
    for pla in plants:
        power = pla.potential_power(efficiency=efficiency)
        if power > min_power:
            for cat, ink in enumerate(pla.intakes):
                if version == 70:
                    new_vec.write(
                        pla.line,
                        (pla.id, pla.id_stream, power,
                         float(pla.restitution.discharge), float(
                             ink.elevation), float(pla.restitution.elevation)))
                else:
                    new_vec.write(pla.line,
                                  cat=cat,
                                  attrs=(pla.id, pla.id_stream, power,
                                         float(pla.restitution.discharge),
                                         float(ink.elevation),
                                         float(pla.restitution.elevation)))

    new_vec.table.conn.commit()
    new_vec.comment = (' '.join(sys.argv))
    #pdb.set_trace()
    new_vec.close()
Esempio n. 2
0
    def setUp(self):
        """Create input data
        """
        self.runModule("g.region", res=1, n=90, s=0, w=0, e=90)
        self.runModule("r.mapcalc", expression="map_a = 100 + row() + col()",
                       overwrite=True)
        self.runModule("r.mapcalc", expression="zone_map = if(row() < 20, 1,2)",
                       overwrite=True)
        self.runModule("r.mapcalc", expression="row_map = row()",
                       overwrite=True)
        self.runModule("r.to.vect", input="zone_map", output="zone_map",
                       type="area", overwrite=True)
        cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'name', 'VARCHAR(20)')]
        vt = VectorTopo('test_line')
        vt.open('w', tab_cols=cols)
        line1 = Line([(1, 1), (2, 1), (2, 2)])
        line2 = Line([(10, 20), (15, 22), (20, 32), (30, 40)])
        vt.write(line1, ('first',))
        vt.write(line2, ('second',))
        vt.table.conn.commit()
        vt.close()

        vt = VectorTopo('test_small_area')
        vt.open('w', tab_cols=cols)
        area1 = Boundary(points=[(0, 0), (0, 0.2), (0.2, 0.2), (0.2, 0), (0, 0)])
        area2 = Boundary(points=[(2.7, 2.7), (2.7, 2.8), (2.8, 2.8), (2.8, 2.7), (2.7, 2.7)])
        cent1 = Centroid(x=0.1, y=0.1)
        cent2 = Centroid(x=2.75, y=2.75)
        vt.write(area1)
        vt.write(area2)
        vt.write(cent1, ('first',))
        vt.write(cent2, ('second',))
        vt.table.conn.commit()
        vt.close()
Esempio n. 3
0
def write_points(plants, output, efficiency, min_power):
    # create vetor segment
    new_vec = VectorTopo(output)
    #TODO:  check if the vector already exists
    new_vec.layer = 1
    new_vec.open('w', tab_cols=COLS_points)
    reg = Region()

    # import ipdb; ipdb.set_trace()
    for pla in plants:
        power = pla.potential_power(efficiency=efficiency)
        if power > min_power:
            new_vec.write(pla.line[-1],
                          (pla.restitution.id, pla.id, 'restitution',
                           pla.id_stream, float(pla.restitution.elevation),
                           float(pla.restitution.discharge), power))
            for ink in pla.intakes:
                new_vec.write(
                    pla.line[0],
                    (ink.id, pla.id, 'intake', pla.id_stream,
                     float(ink.elevation), float(ink.discharge), power))

    new_vec.table.conn.commit()
    new_vec.comment = (' '.join(sys.argv))
    new_vec.write_header()
    #pdb.set_trace()
    new_vec.close()
Esempio n. 4
0
def rand_vect_points(name, npoints=10, overwrite=True):
    new = VectorTopo(name)
    new.open('w', overwrite=overwrite)
    for pnt in get_random_points(npoints):
        new.write(pnt)
    new.close()
    return new
    def __init__(self, name_map, cat):
        """ Return
        """
        self.cat = cat
        self.name = name_map
        self.izq = ''

        topo = VectorTopo(name_map)
        topo.open('r', layer=1)
        line1 = topo.read(cat)

        self.type = line1.attrs['type']
        self.pk_ini = float(''.join(line1.attrs['pk'].split('+')))
        self.pto_ini = Base.RoadPoint(line1[0])
        self.pto_fin = line1[-1]
        self.param = float(line1.attrs['param'].split('=')[1])
        self.center = None
        self.recta = None

        self.out = {'pks_in': [], 'pks_out': [], 'radios': [], 'dif': []}

        topo.close()

        topo.open('r', layer=2)
        self.azimut_ini = topo.read(cat).attrs['azimut']
        topo.close()

        if self.type == 'Curve':
            self.init_curve(topo)

        elif self.type == 'Straight':
            self.init_straight()
        else:
            grass.message(' not yet implemented')
Esempio n. 6
0
def conv_segpoints(seg, output):

    segments, mset = (seg.split('@') if '@' in seg else (seg, ''))
    # convert the map with segments in a map with intakes and restitution
    new_vec = VectorTopo(output)
    #TODO:  check if the vector already exists
    new_vec.layer = 1
    new_vec.open('w', tab_cols=COLS_points)
    reg = Region()

    seg = VectorTopo(segments, mapset=mset)
    seg.layer = 1
    seg.open('r')

    for pla in seg:
        #import ipdb; ipdb.set_trace()
        new_vec.write(pla[-1],
                      (2, pla.attrs['plant_id'], 'restitution',
                       pla.attrs['stream_id'], pla.attrs['elev_down'],
                       pla.attrs['discharge'], pla.attrs['pot_power']))
        #import ipdb; ipdb.set_trace()
        new_vec.write(pla[0], (1, pla.attrs['plant_id'], 'intake',
                               pla.attrs['stream_id'], pla.attrs['elev_up'],
                               pla.attrs['discharge'], pla.attrs['pot_power']))

    new_vec.table.conn.commit()
    new_vec.comment = (' '.join(sys.argv))
    new_vec.write_header()
    #pdb.set_trace()
    new_vec.close()

    return new_vec
Esempio n. 7
0
    def __init__(self, name_map, cat):
        """Return"""
        self.cat = cat
        self.name = name_map
        self.izq = ""

        topo = VectorTopo(name_map)
        topo.open("r", layer=1)
        line1 = topo.read(cat)

        self.type = line1.attrs["type"]
        self.pk_ini = float("".join(line1.attrs["pk"].split("+")))
        self.pto_ini = Base.RoadPoint(line1[0])
        self.pto_fin = line1[-1]
        self.param = float(line1.attrs["param"].split("=")[1])
        self.center = None
        self.recta = None

        self.out = {"pks_in": [], "pks_out": [], "radios": [], "dif": []}

        topo.close()

        topo.open("r", layer=2)
        self.azimut_ini = topo.read(cat).attrs["azimut"]
        topo.close()

        if self.type == "Curve":
            self.init_curve(topo)

        elif self.type == "Straight":
            self.init_straight()
        else:
            grass.message(" not yet implemented")
Esempio n. 8
0
def sample(vect_in_name, rast_in_name):
    """sample('point00', 'field')"""
    # instantiate the object maps
    vect_in = VectorTopo(vect_in_name)
    rast_in = RasterRow(rast_in_name)
    vect_out = VectorTopo('test_' + vect_in_name)
    # define the columns of the attribute table of the new vector map
    columns = [(u'cat',       'INTEGER PRIMARY KEY'),
               (rast_in_name,  'DOUBLE')]
    # open the maps
    vect_in.open('r')
    rast_in.open('r')
    vect_out.open('w', tab_cols=columns, link_driver='sqlite')
    # get the current region
    region = Region()
    # initialize the counter
    counter = 0
    data = []
    for pnt in vect_in.viter('points'):
        counter += 1
        # transform the spatial coordinates in row and col value
        x, y = coor2pixel(pnt.coords(), region)
        value = rast_in[int(x)][int(y)]
        data.append((counter, None if np.isnan(value) else float(value)))
        # write the geometry features
        vect_out.write(pnt)
    # write the attributes
    vect_out.table.insert(data, many=True)
    vect_out.table.conn.commit()
    # close the maps
    vect_in.close()
    rast_in.close()
    vect_out.close()
Esempio n. 9
0
def vedit_break(inShp, pntBreakShp, geomType='point,line,boundary,centroid'):
    """
    Use tool break
    """

    import os
    from grass.pygrass.modules import Module

    # Iterate over pntBreakShp to get all coords
    if os.path.isfile(pntBreakShp):
        from glass.g.rd.shp import points_to_list

        lstPnt = points_to_list(pntBreakShp)
    else:
        from grass.pygrass.vector import VectorTopo

        pnt = VectorTopo(pntBreakShp)
        pnt.open(mode='r')
        lstPnt = ["{},{}".format(str(p.x), str(p.y)) for p in pnt]

    # Run v.edit
    m = Module("v.edit",
               map=inShp,
               type=geomType,
               tool="break",
               coords=lstPnt,
               overwrite=True,
               run_=False,
               quiet=True)

    m()
Esempio n. 10
0
    def test_strahler(self):
        self.assertModule(
            "v.stream.order",
            input="stream_network",
            points="stream_network_outlets",
            output="stream_network_order_test_strahler",
            threshold=25,
            order=["strahler"],
            overwrite=True,
            verbose=True,
        )

        # Check the strahler value
        v = VectorTopo(name="stream_network_order_test_strahler", mapset="")
        v.open(mode="r")

        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 41)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)

        v.close()
Esempio n. 11
0
def find_segments(river, discharge, dtm, range_plant, distance, p_max):
    check_multilines(river)
    #pdb.set_trace()
    river, mset = river.split('@') if '@' in river else (river, '')
    vec = VectorTopo(river, mapset=mset, mode='r')
    vec.open("r")
    raster_q = RasterRow(discharge)
    raster_dtm = RasterRow(dtm)
    raster_q.open('r')
    raster_dtm.open('r')
    reg = Region()
    plants = []
    for line in vec:
        count = 0
        # args is prog, h,  q
        line, prog, h, q = build_array(line, raster_q, raster_dtm)
        #pdb.set_trace()
        if len(line) > 2:
            #            import ipdb; ipdb.set_trace()
            #        else:
            # import pdb; pdb.set_trace()
            plants = recursive_plant(
                (prog, h, q), range_plant, distance, prog[0], prog[-1],
                str(line.cat), line.cat, line, plants, count, p_max)
    #pdb.set_trace()
    vec.close()
    raster_q.close()
    raster_dtm.close()
    return plants
Esempio n. 12
0
def rand_vect_points(name, npoints=10, overwrite=True):
    new = VectorTopo(name)
    new.open('w', overwrite=overwrite)
    for pnt in get_random_points(npoints):
        new.write(pnt)
    new.close()
    return new
Esempio n. 13
0
def get_electro_length(opts):
    # open vector plant
    pname = opts['struct']
    pname, vmapset = pname.split('@') if '@' in pname else (pname, '')
    with VectorTopo(pname,
                    mapset=vmapset,
                    layer=int(opts['struct_layer']),
                    mode='r') as vect:
        kcol = opts['struct_column_kind']
        ktype = opts['struct_kind_turbine']
        # check if electro_length it is alredy in the table
        if 'electro_length' not in vect.table.columns:
            vect.table.columns.add('electro_length', 'double precision')
        # open vector map with the existing electroline
        ename = opts['electro']
        ename, emapset = ename.split('@') if '@' in ename else (ename, '')
        ltemp = []
        with VectorTopo(ename,
                        mapset=emapset,
                        layer=int(opts['electro_layer']),
                        mode='r') as electro:
            pid = os.getpid()
            elines = (opts['elines'] if opts['elines'] else
                      ('tmprgreen_%i_elines' % pid))
            for cat, line in enumerate(vect):
                if line.attrs[kcol] == ktype:
                    # the turbine is the last point of the penstock
                    turbine = line[-1]
                    # find the closest electro line
                    eline = electro.find['by_point'].geo(turbine, maxdist=1e6)
                    dist = eline.distance(turbine)
                    line.attrs['electro_length'] = dist.dist
                    if line.attrs['side'] == 'option1':
                        ltemp.append([
                            geo.Line([turbine, dist.point]),
                            (line.attrs['plant_id'], line.attrs['side'])
                        ])
                else:
                    line.attrs['electro_length'] = 0.
            vect.table.conn.commit()
        new = VectorTopo(elines)  # new vec with elines
        new.layer = 1
        cols = [
            (u'cat', 'INTEGER PRIMARY KEY'),
            (u'plant_id', 'VARCHAR(10)'),
            (u'side', 'VARCHAR(10)'),
        ]
        new.open('w', tab_cols=cols)
        reg = Region()
        for cat, line in enumerate(ltemp):
            if version == 70:
                new.write(line[0], line[1])
            else:
                new.write(line[0], cat=cat, attrs=line[1])
        new.table.conn.commit()
        new.comment = (' '.join(sys.argv))
        new.close()
Esempio n. 14
0
def get_electro_length(opts):
    # open vector plant
    pname = opts["struct"]
    pname, vmapset = pname.split("@") if "@" in pname else (pname, "")
    with VectorTopo(pname,
                    mapset=vmapset,
                    layer=int(opts["struct_layer"]),
                    mode="r") as vect:
        kcol = opts["struct_column_kind"]
        ktype = opts["struct_kind_turbine"]
        # check if electro_length it is alredy in the table
        if "electro_length" not in vect.table.columns:
            vect.table.columns.add("electro_length", "double precision")
        # open vector map with the existing electroline
        ename = opts["electro"]
        ename, emapset = ename.split("@") if "@" in ename else (ename, "")
        ltemp = []
        with VectorTopo(ename,
                        mapset=emapset,
                        layer=int(opts["electro_layer"]),
                        mode="r") as electro:
            pid = os.getpid()
            elines = opts["elines"] if opts["elines"] else (
                "tmprgreen_%i_elines" % pid)
            for cat, line in enumerate(vect):
                if line.attrs[kcol] == ktype:
                    # the turbine is the last point of the penstock
                    turbine = line[-1]
                    # find the closest electro line
                    eline = electro.find["by_point"].geo(turbine, maxdist=1e6)
                    dist = eline.distance(turbine)
                    line.attrs["electro_length"] = dist.dist
                    if line.attrs["side"] == "option1":
                        ltemp.append([
                            geo.Line([turbine, dist.point]),
                            (line.attrs["plant_id"], line.attrs["side"]),
                        ])
                else:
                    line.attrs["electro_length"] = 0.0
            vect.table.conn.commit()
        new = VectorTopo(elines)  # new vec with elines
        new.layer = 1
        cols = [
            (u"cat", "INTEGER PRIMARY KEY"),
            (u"plant_id", "VARCHAR(10)"),
            (u"side", "VARCHAR(10)"),
        ]
        new.open("w", tab_cols=cols)
        reg = Region()
        for cat, line in enumerate(ltemp):
            if version == 70:
                new.write(line[0], line[1])
            else:
                new.write(line[0], cat=cat, attrs=line[1])
        new.table.conn.commit()
        new.comment = " ".join(sys.argv)
        new.close()
Esempio n. 15
0
    def _load(self, options):
        # load input vector, initial controls
        if int(options["layer"]) == 0:
            _layer = ""
            _column = ""
        else:
            _layer = int(options["layer"])
            if options["column"]:
                _column = options["column"]
            else:
                grass.message("Name of z column required for 2D vector maps.")
        # convert vector to ASCII
        grass.run_command(
            "v.out.ascii",
            overwrite=1,
            input=options["input"].split("@")[0],
            output=self._tmpcat,
            format="point",
            separator="space",
            precision=15,
            where=options["where"],
            layer=_layer,
            columns=_column,
        )
        #        grass.run_command("v.out.ascii", flags='r', overwrite=1,
        #                          input=options['input'], output=self._tmpcat,
        #                          format="point", separator="space", precision=15,
        #                          where=options['where'], layer=_layer,
        #                          columns=_column
        # edit ASCII file, crop out one column
        if int(options["layer"]) > 0:
            fin = open(self._tmpcat, "r")
            fout = open(self._tmpxyz, "w")
            try:
                for line in fin:
                    parts = line.split(" ")
                    from grass.pygrass.vector import VectorTopo

                    pnt = VectorTopo(options["input"].split("@")[0])
                    pnt.open(mode="r")
                    check = pnt.read(1)
                    if check.is2D:
                        # fout.write(parts[0]+' '+parts[1]+' '+parts[3])
                        fout.write("{} {} {}".format(parts[0], parts[1],
                                                     parts[3]))
                    else:
                        # fout.write(parts[0]+' '+parts[1]+' '+parts[4])
                        fout.write("{} {} {}".format(parts[0], parts[1],
                                                     parts[4]))
                    pnt.close()
            except (StandardError, OSError) as e:
                grass.fatal_error("Invalid input: %s" % e)
            fin.close()
            fout.close()
        else:
            grass.message("Z coordinates are used.")
Esempio n. 16
0
def write_objs(allrectas, radio):
    """R"""
    new2 = VectorTopo("AACC__" + str(int(radio)))
    # cols = [(u'cat',       'INTEGER PRIMARY KEY'),
    #        (u'elev',      'INTEGER')]

    new2.open("w")
    for obj in allrectas:
        new2.write(obj)
    # new2.table.conn.commit()
    new2.close()
Esempio n. 17
0
 def _load(self, options):
     # load input vector, initial controls
     if int(options['layer']) == 0:
         _layer = ''
         _column = ''
     else:
         _layer = int(options['layer'])
         if options['column']:
             _column = options['column']
         else:
             grass.message('Name of z column required for 2D vector maps.')
     # convert vector to ASCII
     grass.run_command("v.out.ascii",
                       overwrite=1,
                       input=options['input'].split('@')[0],
                       output=self._tmpcat,
                       format="point",
                       separator="space",
                       precision=15,
                       where=options['where'],
                       layer=_layer,
                       columns=_column)
     #        grass.run_command("v.out.ascii", flags='r', overwrite=1,
     #                          input=options['input'], output=self._tmpcat,
     #                          format="point", separator="space", precision=15,
     #                          where=options['where'], layer=_layer,
     #                          columns=_column
     # edit ASCII file, crop out one column
     if int(options['layer']) > 0:
         fin = open(self._tmpcat, 'r')
         fout = open(self._tmpxyz, 'w')
         try:
             for line in fin:
                 parts = line.split(" ")
                 from grass.pygrass.vector import VectorTopo
                 pnt = VectorTopo(options['input'].split('@')[0])
                 pnt.open(mode='r')
                 check = pnt.read(1)
                 if check.is2D == True:
                     #fout.write(parts[0]+' '+parts[1]+' '+parts[3])
                     fout.write('{} {} {}'.format(parts[0], parts[1],
                                                  parts[3]))
                 else:
                     #fout.write(parts[0]+' '+parts[1]+' '+parts[4])
                     fout.write('{} {} {}'.format(parts[0], parts[1],
                                                  parts[4]))
                 pnt.close()
         except StandardError, e:
             grass.fatal_error("Invalid input: %s" % e)
         fin.close()
         fout.close()
     def run(self):
          logging.debug("Computation started")

          psc = self.input_psc.getValue()
          map_name = 'obce_psc_{}'.format(psc)

          obce = VectorTopo('obce', mapset='psc')
          obce.open('r')

          vystup = VectorTopo(map_name)
          vystup.open('w', tab_cols=[('cat',       'INTEGER PRIMARY KEY'),
                                     ('nazev',     'TEXT'),
                                     ('psc',       'INTEGER')])

          obec_id = None
          obce_psc = set()
          for prvek in obce.viter('areas'):
               if prvek.attrs is None:
                    continue
               if prvek.attrs['psc'] == psc:
                  if obec_id is None:
                      obec_id = prvek.id

                  for b in prvek.boundaries():
                      for n in b.read_area_ids():
                          if n != -1 and n != obec_id:
                              obce_psc.add(n)
          obce_psc.add(obec_id)

          hranice = list()
          cat = 1
          for prvek in obce.viter('areas'):
              if prvek.id not in obce_psc:
                  continue

              for b in prvek.boundaries():
                  if b.id not in hranice:
                      hranice.append(b.id)
                      vystup.write(b)

              vystup.write(prvek.centroid(), cat=cat, attrs=(prvek.attrs['nazev'], prvek.attrs['psc']))
              cat += 1

          vystup.table.conn.commit()

          vystup.close()
          obce.close()

          logging.debug("Computation finished")

          return map_name
Esempio n. 19
0
    def uppoints(self):
        """Return"""
        name_map = self.name_map + "__Topo"
        topo = VectorTopo(name_map)
        topo.open("r", layer=1)

        pts_org = []
        pts_chg = []
        attrs = []
        for i in range(1, len(topo) + 1):
            act = topo.read(i).attrs["action"]
            if act != "":
                cat = topo.read(i).attrs["cat"]
                pto_org = topo.cat(cat, "points", 1)[0]
                pto_chg = Point(pto_org.x, pto_org.y, pto_org.z)
                if topo.read(i).attrs["x"] is not None:
                    pto_chg.x = float(topo.read(i).attrs["x"])
                if topo.read(i).attrs["y"] is not None:
                    pto_chg.y = float(topo.read(i).attrs["y"])
                if topo.read(i).attrs["z"] is not None:
                    pto_chg.z = float(topo.read(i).attrs["z"])

                pts_org.append(pto_org)
                pts_chg.append(pto_chg)
                attrs.append(
                    [
                        cat,
                        topo.read(i).attrs["pk"],
                        topo.read(i).attrs["name"],
                        topo.read(i).attrs["azi"],
                        topo.read(i).attrs["p_type"],
                        topo.read(i).attrs["align"],
                        topo.read(i).attrs["vparam"],
                        topo.read(i).attrs["v_type"],
                        topo.read(i).attrs["terr"],
                        topo.read(i).attrs["t_type"],
                        topo.read(i).attrs["dist_d"],
                        pto_chg.x,
                        pto_chg.y,
                        pto_chg.z,
                        "",
                    ]
                )
        topo.close()
        if pts_org != []:
            topo.open("rw", 1, with_z=True)
            for i, pto in enumerate(pts_org):
                topo.rewrite(pto, pts_chg[i], attrs[i][1:])
            topo.table.conn.commit()
            topo.close()
Esempio n. 20
0
def conv_segpoints(seg, output):

    segments, mset = seg.split("@") if "@" in seg else (seg, "")
    # convert the map with segments in a map with intakes and restitution
    new_vec = VectorTopo(output)
    # TODO:  check if the vector already exists
    new_vec.layer = 1
    new_vec.open("w", tab_cols=COLS_points)
    reg = Region()

    seg = VectorTopo(segments, mapset=mset)
    seg.layer = 1
    seg.open("r")

    for pla in seg:
        # import ipdb; ipdb.set_trace()
        new_vec.write(
            pla[-1],
            (
                2,
                pla.attrs["plant_id"],
                "restitution",
                pla.attrs["stream_id"],
                pla.attrs["elev_down"],
                pla.attrs["discharge"],
                pla.attrs["pot_power"],
            ),
        )
        # import ipdb; ipdb.set_trace()
        new_vec.write(
            pla[0],
            (
                1,
                pla.attrs["plant_id"],
                "intake",
                pla.attrs["stream_id"],
                pla.attrs["elev_up"],
                pla.attrs["discharge"],
                pla.attrs["pot_power"],
            ),
        )

    new_vec.table.conn.commit()
    new_vec.comment = " ".join(sys.argv)
    new_vec.write_header()
    # pdb.set_trace()
    new_vec.close()

    return new_vec
Esempio n. 21
0
def check_multilines(vector):
    vector, mset = vector.split("@") if "@" in vector else (vector, "")
    msgr = get_msgr()
    vec = VectorTopo(vector, mapset=mset, mode="r")
    vec.open("r")
    info = gcore.parse_command("v.category", input=vector, option="print")
    for i in info.keys():
        vec.cat(int(i), "lines", 1)
        #        if i == '28':
        #            import ipdb; ipdb.set_trace()
        if len(vec.cat(int(i), "lines", 1)) > 1:
            # import ipdb; ipdb.set_trace()
            warn = "Multilines for the same category %s" % i
            msgr.warning(warn)
    vec.close()
Esempio n. 22
0
def check_multilines(vector):
    vector, mset = vector.split('@') if '@' in vector else (vector, '')
    msgr = get_msgr()
    vec = VectorTopo(vector, mapset=mset, mode='r')
    vec.open("r")
    info = gcore.parse_command('v.category', input=vector, option='print')
    for i in info.keys():
        vec.cat(int(i), 'lines', 1)
        #        if i == '28':
        #            import ipdb; ipdb.set_trace()
        if len(vec.cat(int(i), 'lines', 1)) > 1:
            # import ipdb; ipdb.set_trace()
            warn = ("Multilines for the same category %s" % i)
            msgr.warning(warn)
    vec.close()
Esempio n. 23
0
def _get_vector_features_as_wkb_list(lock, conn, data):
    """Return vector layer features as wkb list

    supported feature types:
    point, centroid, line, boundary, area

    :param lock: A multiprocessing.Lock instance
    :param conn: A multiprocessing.Pipe instance used to send True or False
    :param data: The list of data entries [function_id,name,mapset,extent,
                                           feature_type, field]

    """
    wkb_list = None
    try:
        name = data[1]
        mapset = data[2]
        extent = data[3]
        feature_type = data[4]
        field = data[5]
        bbox = None

        mapset = utils.get_mapset_vector(name, mapset)

        if not mapset:
            raise ValueError("Unable to find vector map <%s>" % (name))

        layer = VectorTopo(name, mapset)

        if layer.exist() is True:
            if extent is not None:
                bbox = Bbox(
                    north=extent["north"],
                    south=extent["south"],
                    east=extent["east"],
                    west=extent["west"],
                )

            layer.open("r")
            if feature_type.lower() == "area":
                wkb_list = layer.areas_to_wkb_list(bbox=bbox, field=field)
            else:
                wkb_list = layer.features_to_wkb_list(
                    bbox=bbox, feature_type=feature_type, field=field)
            layer.close()
    finally:
        # Send even if an exception was raised.
        conn.send(wkb_list)
Esempio n. 24
0
    def uppoints(self):
        """ Return
        """
        name_map = self.name_map + '__Topo'
        topo = VectorTopo(name_map)
        topo.open('r', layer=1)

        pts_org = []
        pts_chg = []
        attrs = []
        for i in range(1, len(topo) + 1):
            act = topo.read(i).attrs['action']
            if act != '':
                cat = topo.read(i).attrs['cat']
                pto_org = topo.cat(cat, 'points', 1)[0]
                pto_chg = Point(pto_org.x, pto_org.y, pto_org.z)
                if topo.read(i).attrs['x'] is not None:
                    pto_chg.x = float(topo.read(i).attrs['x'])
                if topo.read(i).attrs['y'] is not None:
                    pto_chg.y = float(topo.read(i).attrs['y'])
                if topo.read(i).attrs['z'] is not None:
                    pto_chg.z = float(topo.read(i).attrs['z'])

                pts_org.append(pto_org)
                pts_chg.append(pto_chg)
                attrs.append([
                    cat,
                    topo.read(i).attrs['pk'],
                    topo.read(i).attrs['name'],
                    topo.read(i).attrs['azi'],
                    topo.read(i).attrs['p_type'],
                    topo.read(i).attrs['align'],
                    topo.read(i).attrs['vparam'],
                    topo.read(i).attrs['v_type'],
                    topo.read(i).attrs['terr'],
                    topo.read(i).attrs['t_type'],
                    topo.read(i).attrs['dist_d'], pto_chg.x, pto_chg.y,
                    pto_chg.z, ''
                ])
        topo.close()
        if pts_org != []:
            topo.open('rw', 1, with_z=True)
            for i, pto in enumerate(pts_org):
                topo.rewrite(pto, pts_chg[i], attrs[i][1:])
            topo.table.conn.commit()
            topo.close()
Esempio n. 25
0
def feat_count(shp, gisApi='pandas'):
    """
    Count the number of features in a feature class
    
    API'S Available:
    * gdal;
    * arcpy;
    * pygrass;
    * pandas;
    """
    
    if gisApi == 'ogr':
        from osgeo        import ogr
        from gasp.prop.ff import drv_name
    
        data = ogr.GetDriverByName(drv_name(shp)).Open(shp, 0)
        lyr = data.GetLayer()
        fcnt = int(lyr.GetFeatureCount())
        data.Destroy()
    
    elif gisApi == 'arcpy':
        import arcpy
        
        fcnt = int(arcpy.GetCount_management(lyr).getOutput(0))
    
    elif gisApi == 'pygrass':
        from grass.pygrass.vector import VectorTopo
        
        open_shp = VectorTopo(shp)
        open_shp.open(mode='r')
        fcnt = open_shp.num_primitive_of(geom)
    
    elif gisApi == 'pandas':
        from gasp.fm import tbl_to_obj
        
        gdf = tbl_to_obj(shp)
        
        fcnt = int(gdf.shape[0])
        
        del gdf
    
    else:
        raise ValueError('The api {} is not available'.format(gisApi))
    
    return fcnt
Esempio n. 26
0
def sumRasterPath(pvc,nogo):
    ng_path = VectorTopo(nogo)
    ng_path.open('r')
    invalid = Set([])
    for seg in ng_path:
        invalid.add(seg.attrs['a_cat'])
    ng_path.close()
    path = VectorTopo(pvc)
    path.open('r')
    actual_costs = {}
    for seg in path:
        if seg.attrs['a_cat'] in invalid:
            continue
        if actual_costs.has_key(seg.attrs['a_cat']):
            actual_costs[seg.attrs['a_cat']]+= seg.attrs['b_friction']*seg.length()
        else:
            actual_costs[seg.attrs['a_cat']] = seg.attrs['b_friction']*seg.length()
    path.close()
    return actual_costs
 def setUp(self):
     """Create input data
     """
     self.runModule("g.region", res=1, n=90, s=0, w=0, e=90)
     self.runModule("r.mapcalc", expression="map_a = 100 + row() + col()",
                    overwrite=True)
     self.runModule("r.mapcalc", expression="zone_map = if(row() < 20, 1,2)",
                    overwrite=True)
     self.runModule("r.to.vect", input="zone_map", output="zone_map",
                    type="area", overwrite=True)
     cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'name', 'VARCHAR(20)')]
     vt = VectorTopo('test_line')
     vt.open('w', tab_cols=cols)
     line1 = Line([(1, 1), (2, 1), (2, 2)])
     line2 = Line([(10, 20), (15, 22), (20, 32), (30, 40)])
     vt.write(line1, ('first',))
     vt.write(line2, ('second',))
     vt.table.conn.commit()
     vt.close()
Esempio n. 28
0
def exportResultTable(cell_size):
    output = [['Cat', 'visi_cost', '8_prop', '16_prop', '8_act', '16_act']]
    raster_costs8 = sumRasterPath(pvc8, nogo8)
    raster_costs16 = sumRasterPath(pvc16, nogo16)
    trg = VectorTopo(targetmap)
    trg.open('r')
    for t in trg:
        outr = [str(t.attrs['cat'])]
        print 'cat:', t.attrs['cat']
        print 'visi cost:', t.attrs['cost']
        if t.attrs['cost']>=0:
            outr.append(str(t.attrs['cost']))
        else:
            outr.append('nf')
        if t.attrs[trg_c8]:
            print '8 proposed:',t.attrs[trg_c8]*cell_size
            outr.append(str(t.attrs[trg_c8]*cell_size))
        else:
            outr.append('nf')
            print 'no 8 path found'
        if t.attrs[trg_c16]:
            print '16 proposed:',t.attrs[trg_c16]*cell_size
            outr.append(str(t.attrs[trg_c16]*cell_size))
        else:
            print 'no 16 path found'
            outr.append('nf')            
        if raster_costs8.has_key(t.attrs['cat']):
            print '8 actual:',raster_costs8[t.attrs['cat']]
            outr.append(str(raster_costs8[t.attrs['cat']]))
        else:
            print 'invalid 8 con raster path'
            outr.append('inv')
        if raster_costs16.has_key(t.attrs['cat']):
            print '16 actual:',raster_costs16[t.attrs['cat']]
            outr.append(str(raster_costs16[t.attrs['cat']]))
        else:
            print 'invalid 16 con raster path'
            outr.append('inv')            
        print ""
        output.append(outr)
    np.savetxt(path + result_file, output, delimiter = ';', fmt = '%s')
Esempio n. 29
0
    def new_map(self, mapa, layer, tab_sufix, objs, values, tab_subname=''):
        """Return
        """
        map_out = VectorTopo(mapa)
        if objs == [] or objs is None:
            return None

        tab_sufix_out = OUT_TABLES_NAMES[tab_sufix]
        tab_name = self.road_name + tab_sufix_out + tab_subname

        columns = OUT_TABLES[tab_sufix]
        if layer == 1:
            map_out.open('w', layer=layer, with_z=True, tab_name=tab_name,
                         tab_cols=columns)
        else:
            map_out.open('rw')
            link = Link(layer, tab_name, tab_name, 'cat' + str(layer))
            map_out.dblinks.add(link)
            table = link.table()
            if not table.exist():
                table.create(columns)
            table.conn.commit()
            map_out.close()

            map_out.open('rw', layer=layer, with_z=True)
        for i, obj in enumerate(objs):
            map_out.write(obj, i + 1, values[i])
        map_out.table.conn.commit()
        map_out.close()
Esempio n. 30
0
    def split_maps(self):
        """ Return
        """
        grass.message("Spliting in points and breaklines maps")

        topo = VectorTopo(self.name_map)
        topo.open('r')

        points = []
        lines = []
        for algo in range(1, topo.number_of("points") + 1):
            if isinstance(topo.read(algo), Point):
                points.append(topo.read(algo))
            if isinstance(topo.read(algo), Line):
                lines.append(topo.read(algo))
        topo.close()

        new1 = VectorTopo(self.ptosmap)
        new1.open('w', with_z=True)
        for pnt in points:
            new1.write(pnt)
        new1.close()

        new1 = VectorTopo(self.breakmap)
        new1.open('w', layer=1, with_z=True)
        for line in lines:
            new1.write(line)
        new1.close()
Esempio n. 31
0
    def curved(self):
        """Return"""
        mapset = GrassGis.G_find_vector2(self.nametin, "")
        if not mapset:
            sys.exit("Vector map <%s> not found" % self.nametin)

        # define map structure
        map_info = GrassGis.pointer(GrassVect.Map_info())

        # define open level (level 2: topology)
        GrassVect.Vect_set_open_level(2)

        # open existing vector map
        GrassVect.Vect_open_old(map_info, self.nametin, mapset)

        print("Calculating curves")

        allrectas = []
        rectas = self.get_rectas(map_info)

        for nivel in rectas:
            for recta in nivel:
                allrectas.append(recta)

        GrassVect.Vect_close(map_info)

        new = VectorTopo(self.namelines)
        new.open("w", with_z=True)

        for line in allrectas:
            new.write(Line(line))
        new.close()

        grass.run_command(
            "v.build.polylines",
            input=self.namelines,
            output=self.namecurved,
            overwrite=True,
            quiet=True,
        )
     def obce_psc(psc):
          obce = VectorTopo('obce')
          obce.open('r')

          vystup = VectorTopo('obce_psc_{}'.format(psc))
          vystup.open('w', tab_cols=[('cat',       'INTEGER PRIMARY KEY'),
                                     ('nazev',     'TEXT'),
                                     ('psc',       'INTEGER')])

          obec_id = None
          obce_psc = set()
          for prvek in obce.viter('areas'):
              if prvek.attrs is None:
                  continue
              if prvek.attrs['psc'] == psc:
                  if obec_id is None:
                      obec_id = prvek.id

                  for b in prvek.boundaries():
                      for n in b.read_area_ids():
                          if n != -1 and n != obec_id:
                              obce_psc.add(n)
          obce_psc.add(obec_id)

          hranice = list()
          for prvek in obce.viter('areas'):
              if prvek.id not in obce_psc:
                  continue

              for b in prvek.boundaries():
                  if b.id not in hranice:
                      hranice.append(b.id)
                      vystup.write(b, attrs=(None, None))

              vystup.write(prvek.centroid(), attrs=(prvek.attrs['nazev'], prvek.attrs['psc']))

          vystup.table.conn.commit()

          vystup.close()
          obce.close()
Esempio n. 33
0
def _get_vector_table_as_dict(lock, conn, data):
    """Get the table of a vector map layer as dictionary

    :param lock: A multiprocessing.Lock instance
    :param conn: A multiprocessing.Pipe instance used to send True or False
    :param data: The list of data entries [function_id, name, mapset, where]

    """
    ret = None
    try:
        name = data[1]
        mapset = data[2]
        where = data[3]

        mapset = utils.get_mapset_vector(name, mapset)

        if not mapset:
            raise ValueError("Unable to find vector map <%s>" % (name))

        layer = VectorTopo(name, mapset)

        if layer.exist() is True:
            layer.open("r")
            columns = None
            table = None
            if layer.table is not None:
                columns = layer.table.columns
                table = layer.table_to_dict(where=where)
            layer.close()

            ret = {}
            ret["table"] = table
            ret["columns"] = columns
    finally:
        # Send even if an exception was raised.
        conn.send(ret)
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

zachranka = VectorTopo('adresnimista_zachranka', mapset='ruian_praha')
zachranka.open('r')
ulice = VectorTopo('ulice', mapset='ruian_praha')
ulice.open('r')

zu = VectorTopo('zachranka_ulice')
cols = [('cat',       'INTEGER PRIMARY KEY'),
        ('kod',       'INTEGER'),
        ('ulice',     'TEXT'),
        ('nespravny', 'INTEGER')]
zu.open('w', tab_cols=cols)

seznam = []
for z in zachranka:
    u = ulice.find['by_point'].geo(z, maxdist=1000.)
    if u is None:
        continue
    nespravny = z.attrs['ulicekod'] != u.attrs['kod']
    print (u'{:10} {:1} {}'.format(z.attrs['kod'], nespravny, u.attrs['nazev']))
    zu.write(z, (z.attrs['kod'], u.attrs['nazev'], nespravny))
    if u.cat not in seznam:
        zu.write(u, (None, u.attrs['nazev'], None))
        seznam.append(u.cat)

zu.table.conn.commit() # nutne pro zapis atributu !!!

zu.close()
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

psc = '41115'

obce = VectorTopo('obce')
obce.open('r')

vystup = VectorTopo('obce_psc_{}'.format(psc))
vystup.open('w', tab_cols=[('cat',       'INTEGER PRIMARY KEY'),
                           ('nazev',     'TEXT'),
                           ('psc',       'INTEGER')])

obec_id = None
obce_psc = set()
for prvek in obce.viter('areas'):
    if prvek.attrs['psc'] == psc:
        if obec_id is None:
            obec_id = prvek.id
            
        for b in prvek.boundaries():
            for n in b.get_left_right():
                if n != -1 and n != obec_id:
                    obce_psc.add(n)
obce_psc.add(obec_id)

hranice = list()
for prvek in obce.viter('areas'):
    if prvek.id not in obce_psc:
        continue
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

psc = '41115'

obce = VectorTopo('obce')
obce.open('r')

print ("Seznam obci s PSC {}:".format(psc))
obce_psc = set()
for prvek in obce.viter('areas'):
    if prvek.attrs['psc'] != psc:
        continue
    obce_psc.add(prvek.id)
    print (u"{0}: {1}".format(psc, prvek.attrs['nazev']))
    
sousede = set()
for prvek in obce.viter('areas'):
    if prvek.id not in obce_psc:
        continue

    for b in prvek.boundaries():
        for n in b.get_left_right():
            if n != -1 and n != prvek.id:
               sousede.add(n)

print ("Seznam sousednich obce:")
for prvek in obce.viter('areas'):
    if prvek.id not in sousede or \
       prvek.attrs['psc'] == psc:
Esempio n. 37
0
#!/usr/bin/env python

from grass.pygrass.vector import VectorTopo

okresy = VectorTopo('okresy_polygon', mapset='ruian')
okresy.open('r')

for o in okresy.viter('areas'):
    sousede = set()
    for b in o.boundaries():
        for n in b.get_left_right():
            if n != -1 and n != o.id:
                sousede.add(n)
    
    print (u'{:20}: {}'.format(o.attrs['nazev'], len(sousede)))

okresy.close()
Esempio n. 38
0
def vect(stream_in_name, stream_out_name,
         direction_in_name, accumulation_in_name, distance_in_name):
    '''Builds vector map from stream raster map.'''

    # Instantiate maps
    print "Fetching maps..."
    stream_in       = RasterRowIO(stream_in_name)
    direction_in    = RasterSegment(direction_in_name)
    accumulation_in = RasterSegment(accumulation_in_name)
    distance_in     = RasterSegment(distance_in_name)

    # Initialize output
    stream_out      = VectorTopo(stream_out_name)
    # Define the new vector map attribute table columns
    columns = [(u"cat", "INTEGER PRIMARY KEY"),
               (u"fid", "INTEGER"),
               (u"accum", "DOUBLE"),
               (u"dist", "DOUBLE"),
               (u"source_i", "INTEGER"),
               (u"source_j", "INTEGER"),
               (u"target_i", "INTEGER"),
               (u"target_j", "INTEGER")]
    print "Opening output..."
    stream_out.open('w', tab_name = stream_out_name, tab_cols = columns)

    # Open maps
    print "Loading maps..."
    stream_in.open('r')
    direction_in.open(mode = 'r')
    accumulation_in.open(mode = 'r')
    distance_in.open(mode = 'r')

    # Get the current region to compute coordinates
    region = Region()
    x_shift = region.ewres*.5
    y_shift = region.nsres*.5*(-1.0)


    print "Processing..."
    # For each stream cell...
    i = 0
    for row in stream_in:

        j = 0
        for cell in row:

            if cell < 0:
                j += 1
                continue

            # Retrieve data (direction, accumulation and distance)
            direction    = direction_in[i, j]
            accumulation = accumulation_in[i, j]
            distance     = distance_in[i, j]

            # Get i and j shifts from direction
            (di, dj) = shift[direction]

            # Compute unit vector start and end geo coordinates
            (source_y, source_x) = pixel2coor((j,      i),      region)
            (target_y, target_x) = pixel2coor((j + dj, i + di), region)

            # Build unit vector
            stream_out.write(Line([(source_x + x_shift, source_y + y_shift),
                                   (target_x + x_shift, target_y + y_shift)]),
                             (cell, accumulation, distance, i, j, i + di, j + dj)
                             )

            j += 1

        i += 1

    # Commit database changes
    stream_out.table.conn.commit()

    # Close maps
    stream_in.close()
    direction_in.close()
    accumulation_in.close()
    stream_out.close()
Esempio n. 39
0
def main():
    """
    Input for GSFLOW
    """

    reg = grass.region()

    options, flags = grass.parser()

    basin_mouth_E = options['E']
    basin_mouth_N = options['N']

    accum_thresh = options['threshold']

    # Create drainage direction, flow accumulation, and rivers

    # Manually create streams from accumulation.
    # The one funny step is the cleaning w/ snap, because r.thin allows cells that are
    # diagonal to each other to be next to each other -- creating boxes along the channel
    # that are not consistenet with stream topology
    grass.mapcalc('streams_unthinned = flowAccum > '+str(accum_thresh), overwrite=True)
    grass.run_command('r.null', map='streams_unthinned', setnull=0)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams_raw', type='line', overwrite=True)
    grass.run_command('v.clean', input='streams_raw', output='streams', tool='snap', threshold=1.42*(grass.region()['nsres'] + grass.region()['ewres'])/2., flags='c', overwrite=True) # threshold is one cell
    grass.run_command('v.to.rast', input='streams', output='streams_unthinned', use='val', val=1, overwrite=True)
    grass.run_command('r.thin', input='streams_unthinned', output='streams', overwrite=True)
    grass.run_command('r.to.vect', input='streams', output='streams', type='line', overwrite=True)
    grass.run_command('v.to.rast', input='streams', output='streams', use='cat', overwrite=True)
    # Create drainage basins
    grass.run_command('r.stream.basins', direction='drainageDirection', stream_rast='streams', basins='basins', overwrite=True)
    # If there is any more need to work with nodes, I should check the code I wrote for Kelly Monteleone's paper -- this has river identification and extraction, including intersection points.


    # Vectorize drainage basins
    grass.run_command('r.to.vect', input='basins', output='basins', type='area', flags='v', overwrite=True)

    # Then remove all sub-basins and segments that have negative flow accumulation
    # (i.e. have contributions from outside the map)

    ###################################################################
    # Intermediate step: Remove all basins that have offmap flow
    # i.e., those containing cells with negative flow accumulation
    ###################################################################

    # Method 3 -- even easier
    grass.mapcalc("has_offmap_flow = (flowAccum < 0)", overwrite=True)
    grass.run_command('r.null', map='has_offmap_flow', setnull=0)
    grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
    grass.run_command('r.to.vect', input='has_offmap_flow', output='has_offmap_flow', type='point', overwrite=True)
    grass.run_command('v.db.addcolumn', map='has_offmap_flow', columns='badbasin_cats integer')
    grass.run_command('v.what.vect', map='has_offmap_flow', column='badbasin_cats', query_map='basins', query_column='cat', dmax=60)
    colNames = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['columns'])
    # offmap incoming flow points
    colValues = np.array(grass.vector_db_select('has_offmap_flow', layer=1)['values'].values())
    badcats = colValues[:,colNames == 'badbasin_cats'].squeeze()
    badcats = badcats[badcats != '']
    badcats = badcats.astype(int)
    badcats = list(set(list(badcats)))
    # basins for full cat list
    colNames = np.array(grass.vector_db_select('basins', layer=1)['columns'])
    colValues = np.array(grass.vector_db_select('basins', layer=1)['values'].values())
    allcats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    allcats = list(set(list(allcats)))
    # xor to goodcats
    #goodcats = set(badcats).symmetric_difference(allcats)
    # but better in case somehow there are badcats that are not allcats to do NOT
    goodcats = list(set(allcats) - set(badcats))
    goodcats_str = ''
    for cat in goodcats:
      goodcats_str += str(cat) + ','
    goodcats_str = goodcats_str[:-1] # super inefficient but quick
    grass.run_command('g.rename', vect='basins,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='basins', cats=goodcats_str)
    grass.run_command('g.rename', vect='streams,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='streams', cats=goodcats_str)
    #grass.run_command('g.rename', vect='stream_nodes,tmp', overwrite=True)
    #grass.run_command('v.extract', input='tmp', output='stream_nodes', cats=goodcats_str)

    # Fix pixellated pieces -- formerly here due to one-pixel-basin issue
    reg = grass.region()
    grass.run_command('g.rename', vect='basins,basins_messy', overwrite=True)
    grass.run_command('v.clean', input='basins_messy', output='basins', tool='rmarea', threshold=reg['nsres']*reg['ewres'], overwrite=True)

    # Optional, but recommended becuase not all basins need connect:
    # choose a subset of the region in which to do the PRMS calculation
    grass.run_command( 'r.water.outlet', input='drainageDirection', output='studyBasin', coordinates=str(basin_mouth_E)+','+str(basin_mouth_N) , overwrite=True)
    # Vectorize
    grass.run_command( 'r.to.vect', input='studyBasin', output='studyBasin', type='area', overwrite=True)
    # If there are dangling areas (single-pixel?), just drop them. Not sure if this is the best way to do it
    # No check for two equal areas -- if we have this, there are more fundamental problems in defining 
    # a watershed in contiguous units

    #"""
    # ONLY IF MORE THAN ONE STUDY BASIN -- remove small areas
    grass.run_command( 'v.db.addcolumn', map='studyBasin', columns='area_m2 double precision' )
    grass.run_command( 'v.db.dropcolumn', map='studyBasin', columns='label' )
    grass.run_command( 'v.to.db', map='studyBasin', columns='area_m2', option='area', units='meters')
    drainageAreasRaw = sorted( grass.parse_command( 'v.db.select', map='studyBasin', flags='c').keys() ) # could update to grass.vector_db_select
    drainageAreasList = []
    for row in drainageAreasRaw:
      # cat, area
      drainageAreasList.append(row.split('|'))
    drainageAreasOnly = np.array(drainageAreasList).astype(float)
    catsOnly = drainageAreasOnly[:,0].astype(int)
    drainageAreasOnly = drainageAreasOnly[:,1]
    row_with_max_drainage_area = (drainageAreasOnly == np.max(drainageAreasOnly)).nonzero()[0][0]
    cat_with_max_drainage_area = catsOnly[row_with_max_drainage_area]
    grass.run_command('g.rename', vect='studyBasin,tmp', overwrite=True)
    grass.run_command('v.extract', input='tmp', output='studyBasin', cats=cat_with_max_drainage_area, overwrite=True)
    grass.run_command('g.remove', type='vector', name='tmp', flags='f')
    grass.run_command('v.to.rast', input='studyBasin', output='studyBasin', use='val', value=1, overwrite=True)
    #"""
    """
    # Remove small areas -- easier, though not as sure, as the method above
    grass.run_command('v.rename', vect='studyBasin,tmp', overwrite=True)
    grass.run_command('v.clean', input='tmp', output='studyBasin', tool='rmarea', threshold=1.01*(grass.region()['nsres'] * grass.region()['ewres']), flags='c', overwrite=True) # threshold is one cell
    """


    ###############
    # PLACEHOLDER #
    ###################################################################
    # To do in near future: limit to this basin
    ###################################################################

    # Next, get the order of basins the old-fashioned way: coordinates of endpoints of lines
    # Because I can't use GRASS to query multiple points
    #grass.run_command('v.extract', input='streams', output='streamSegments', type='line', overwrite=True)
    # Maybe I don't even need nodes! 9/4/16 -- nope, doesn't seem so.
    grass.run_command('g.copy', rast='streams,streamSegments')
    grass.run_command('v.db.addcolumn', map='streamSegments', columns='z double precision, flow_accum double precision, x1 double precision, y1 double precision, x2 double precision, y2 double precision')
    grass.run_command('v.to.db', map='streamSegments', option='start', columns='x1, y1')
    grass.run_command('v.to.db', map='streamSegments', option='end', columns='x2, y2')

    colNames = np.array(grass.vector_db_select('streamSegments')['columns'])
    colValues = np.array(grass.vector_db_select('streamSegments')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # xy1: UPSTREAM
    # xy2: DOWNSTREAM
    # (I checked.)
    # So now can use this information to find headwaters and mouths

    # Not sure that thsi is necessary
    nsegs_at_point_1 = []
    nsegs_at_point_2 = []
    for row in xy1:
      nsegs_at_point_1.append(np.sum( np.prod(xy == row, axis=1)))
    for row in xy2:
      nsegs_at_point_2.append(np.sum( np.prod(xy == row, axis=1)))
    nsegs_at_point_1 = np.array(nsegs_at_point_1)
    nsegs_at_point_2 = np.array(nsegs_at_point_2)


    # HRU's have same numbers as their enclosed segments
    # NOT TRUE IN GENERAL -- JUST FOR THIS CASE WITH SUB-BASINS -- WILL NEED TO FIX IN FUTURE



    #############
    # Now, let's copy/rename the sub-basins to HRU and the streamSegments to segment and give them attributes
    ###########################################################################################################

    # Attributes (in order given in manual)

    # HRU
    hru_columns = []
    # Self ID
    hru_columns.append('id integer') # nhru
    # Basic Physical Attributes (Geometry)
    hru_columns.append('hru_area double precision') # acres (!!!!)
    hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    hru_columns.append('hru_elev double precision') # Mean elevation
    hru_columns.append('hru_lat double precision') # Latitude of centroid
    hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Basic Physical Attributes (Other)
    #hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1
    #hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default.
    # Measured input
    hru_columns.append('outlet_sta integer') # Index of streamflow station at basin outlet:
                                         #   station number if it has one, 0 if not
    #    Note that the below specify projections and note lat/lon; they really seem
    #    to work for any projected coordinates, with _x, _y, in meters, and _xlong, 
    #    _ylat, in feet (i.e. they are just northing and easting). The meters and feet
    #    are not just simple conversions, but actually are required for different
    #    modules in the code, and are hence redundant but intentional.
    hru_columns.append('hru_x double precision') # Easting [m]
    hru_columns.append('hru_xlong double precision') # Easting [feet]
    hru_columns.append('hru_y double precision') # Northing [m]
    hru_columns.append('hru_ylat double precision') # Northing [feet]
    # Streamflow and lake routing
    hru_columns.append('K_coef double precision') # Travel time of flood wave to next downstream segment;
                                                  #   this is the Muskingum storage coefficient
                                                  #   1.0 for reservoirs, diversions, and segments flowing
                                                  #   out of the basin
    hru_columns.append('x_coef double precision') # Amount of attenuation of flow wave;
                                                  #   this is the Muskingum routing weighting factor
                                                  #   range: 0.0--0.5; default 0.2
                                                  #   0 for all segments flowing out of the basin
    hru_columns.append('hru_segment integer') # ID of stream segment to which flow will be routed
                                              #   this is for non-cascade routing (flow goes directly
                                              #   from HRU to stream segment)
    hru_columns.append('obsin_segment integer') # Index of measured streamflow station that replaces
                                                #   inflow to a segment

    # Segments
    segment_columns = []
    # Self ID
    segment_columns.append('id integer') # nsegment
    # Streamflow and lake routing
    segment_columns.append('tosegment integer') # Index of downstream segment to which a segment
                                                #   flows (thus differentiating it from hru_segment,
                                                #   which is for HRU's, though segment and HRU ID's
                                                #   are the same when HRU's are sub-basins

    # PRODUCE THE DATA TABLES
    ##########################

    # Create strings
    hru_columns = ",".join(hru_columns)
    segment_columns = ",".join(segment_columns)

    #"""
    # Copy
    grass.run_command('g.copy', vect='basins,HRU', overwrite=True)
    grass.run_command('g.copy', vect='streamSegments,segment', overwrite=True)
    #"""

    # Rename / subset
    """
    # OR GO BACK TO HRU_messy
    grass.run_command('v.overlay', ainput='basins', binput='studyBasin', operator='and', output='HRU_messy', overwrite=True)
    grass.run_command('v.overlay', ainput='streamSegments', binput='studyBasin', operator='and', output='segment_messy', overwrite=True)
    # And clean as well
    grass.run_command('v.clean', input='HRU_messy', output='HRU', tool='rmarea', threshold=reg['nsres']*reg['ewres']*40, overwrite=True)
    grass.run_command('v.clean', input='segment_messy', output='segment', tool='rmdangle', threshold=reg['nsres']*2, overwrite=True)
    # And now that the streams and HRU's no longer have the same cat values, fix 
    # this.
    grass.run_command('v.db.droptable', map='HRU', flags='f')
    grass.run_command('v.db.droptable', map='segment', flags='f')
    #grass.run_command('v.category', input='HRU', option='del', cat='-1', out='tmp', overwrite=True)
    #grass.run_command('v.category', input='tmp', option='add', out='HRU' overwrite=True)
    grass.run_command('v.db.addtable', map='HRU')
    grass.run_command('v.db.addtable', map='segment')

    grass.run_comm


    v.clean HRU
    v.clean
    v
    v.what.vect 
    """

    #grass.run_command('v.clean', input='segment_messy', output='HRU', tool='rmarea', threshold=reg['nsres']*reg['ewres']*20, overwrite=True)


    # Add columns to tables
    grass.run_command('v.db.addcolumn', map='HRU', columns=hru_columns)
    grass.run_command('v.db.addcolumn', map='segment', columns=segment_columns)


    # Produce the data table entries
    ##################################

    """
    # ID numbers
    # There should be a way to do this all at once, but...
    for i in range(len(cats)):
      grass.run_command('v.db.update', map='HRU', column='id', value=nhru[i], where='cat='+str(cats[i]))
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    for i in range(len(cats)):
      grass.run_command('v.db.update', map='segment', column='id', value=nsegment[i], where='cat='+str(cats[i]))
    """

    nhru = np.arange(1, xy1.shape[0]+1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
    # Access the HRU's 
    hru = VectorTopo('HRU')
    # Open the map with topology:
    hru.open('rw')
    # Create a cursor
    cur = hru.table.conn.cursor()
    # Use it to loop across the table
    cur.executemany("update HRU set id=? where cat=?", nhrut)
    # Commit changes to the table
    hru.table.conn.commit()
    # Close the table
    hru.close()

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # Same for segments
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general

    # Somehow only works after I v.clean, not right after v.overlay
    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set id=? where cat=?", nsegmentt)
    segment.table.conn.commit()
    segment.close()

    #hru_columns.append('hru_area double precision')
    grass.run_command('v.to.db', map='HRU', option='area', columns='hru_area', units='acres')

    # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN

    # hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    # hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Slope
    grass.run_command('r.slope.aspect', elevation='srtm', slope='tmp', aspect='aspect', format='percent', overwrite=True) # zscale=0.01 also works to make percent be decimal 0-1
    grass.mapcalc('slope = tmp / 100.', overwrite=True)
    grass.run_command('v.rast.stats', map='HRU', raster='slope', method='average', column_prefix='tmp', flags='c')
    grass.run_command('v.db.update', map='HRU', column='hru_slope', query_column='tmp_average')
    grass.run_command('v.db.dropcolumn', map='HRU', column='tmp_average')
    # Dealing with conversion from degrees (no good average) to something I can
    # average -- x- and y-vectors
    # Geographic coordinates, so sin=x, cos=y.... not that it matters so long 
    # as I am consistent in how I return to degrees
    grass.mapcalc('aspect_x = sin(aspect)', overwrite=True)
    grass.mapcalc('aspect_y = cos(aspect)', overwrite=True)
    #grass.run_command('v.db.addcolumn', map='HRU', columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer')
    grass.run_command('v.rast.stats', map='HRU', raster='aspect_x', method='sum', column_prefix='aspect_x', flags='c')
    grass.run_command('v.rast.stats', map='HRU', raster='aspect_y', method='sum', column_prefix='aspect_y', flags='c')
    # Not actually needed, but maybe good to know
    #grass.run_command('v.rast.stats', map='HRU', raster='aspect_y', method='number', column_prefix='tmp', flags='c')
    #grass.run_command('v.db.renamecolumn', map='HRU', column='tmp_number,ncells_in_hru')
    # NO TRIG FUNCTIONS IN SQLITE!
    #grass.run_command('v.db.update', map='HRU', column='hru_aspect', query_column='DEGREES(ATN2(aspect_y_sum, aspect_x_sum))') # Getting 0, why?
    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" %hru.name)
    _arr = np.array(cur.fetchall())
    _cat = _arr[:,0]
    _aspect_x_sum = _arr[:,1]
    _aspect_y_sum = _arr[:,2]
    aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180./np.pi
    aspect_angle[aspect_angle < 0] += 360 # all positive
    aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose()
    cur.executemany("update HRU set hru_aspect=? where cat=?", aspect_angle_cat)
    hru.table.conn.commit()
    hru.close()

    # hru_columns.append('hru_elev double precision') # Mean elevation
    grass.run_command('v.rast.stats', map='HRU', raster='srtm', method='average', column='tmp', flags='c')
    grass.run_command('v.db.update', map='HRU', column='hru_elev', query_column='tmp_average')
    grass.run_command('v.db.dropcolumn', map='HRU', column='tmp_average')

    # get x,y of centroid -- but have areas not in database table, that do have
    # centroids, and having a hard time finding a good way to get rid of them!
    # They have duplicate category values!
    # Perhaps these are little dangles on the edges of the vectorization where
    # the raster value was the same but pinched out into 1-a few cells?
    # From looking at map, lots of extra centroids on area boundaries, and removing
    # small areas (though threshold hard to guess) gets rid of these

    """
    g.copy vect=HRU,HRUorig # HACK!!!
    v.clean in=HRUorig out=HRU tool=rmarea --o thresh=15000
    """

    #grass.run_command( 'g.rename', vect='HRU,HRU_too_many_centroids')
    #grass.run_command( 'v.clean', input='HRU_too_many_centroids', output='HRU', tool='rmdac')
    grass.run_command('v.db.addcolumn', map='HRU', columns='centroid_x double precision, centroid_y double precision')
    grass.run_command( 'v.to.db', map='HRU', type='centroid', columns='centroid_x, centroid_y', option='coor', units='meters')

    # hru_columns.append('hru_lat double precision') # Latitude of centroid
    colNames = np.array(grass.vector_db_select('HRU', layer=1)['columns'])
    colValues = np.array(grass.vector_db_select('HRU', layer=1)['values'].values())
    xy = colValues[:,(colNames=='centroid_x') + (colNames=='centroid_y')]
    np.savetxt('_xy.txt', xy, delimiter='|', fmt='%s')
    grass.run_command('m.proj', flags='od', input='_xy.txt', output='_lonlat.txt', overwrite=True)
    lonlat = np.genfromtxt('_lonlat.txt', delimiter='|',)[:,:2]
    lonlat_cat = np.concatenate((lonlat, np.expand_dims(_cat, 2)), axis=1)

    # why not just get lon too?
    grass.run_command('v.db.addcolumn', map='HRU', columns='hru_lon double precision')

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_lon=?, hru_lat=? where cat=?", lonlat_cat)
    hru.table.conn.commit()
    hru.close()

    # Easting and Northing for other columns
    grass.run_command('v.db.update', map='HRU', column='hru_x', query_column='centroid_x')
    grass.run_command('v.db.update', map='HRU', column='hru_xlong', query_column='centroid_x*3.28084') # feet
    grass.run_command('v.db.update', map='HRU', column='hru_y', query_column='centroid_y')
    grass.run_command('v.db.update', map='HRU', column='hru_ylat', query_column='centroid_y*3.28084') # feet


    # Streamflow and lake routing
    # tosegment
    """
    # THIS IS THE NECESSARY PART
    # CHANGED (BELOW) TO RE-DEFINE NUMBERS IN SEQUENCE AS HRU'S INSTEAD OF USING
    # THE CAT VALUES
    # Get the first channels in the segment
    tosegment = np.zeros(len(cats)) # default to 0 if they do not flow to another segment
    # Loop over all segments
    #for i in range(len(cats)):
    # From outlet segment
    for i in range(len(xy2)):
      # to inlet segment
      inlets = np.prod(xy1 == xy2[i], axis=1)
      # Update inlet segments with ID of outlets
      tosegment[inlets.nonzero()] = cats[i]
    tosegment_cat = tosegment.copy()
    """

    tosegment_cats = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    tosegment = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    # From outlet segment
    for i in range(len(xy2)):
      # to outlet segment
      outlets = np.prod(xy2 == xy1[i], axis=1)
      # Update outlet segments with ID of inlets
      tosegment[outlets.nonzero()] = nhru[i]
      tosegment_cats[outlets.nonzero()] = cats[i]

    """
      # BACKWARDS!
      # to inlet segment
      inlets = np.prod(xy1 == xy2[i], axis=1)
      # Update inlet segments with ID of outlets
      tosegment_cats[inlets.nonzero()] = cats[i]
    """

    # Now, just update tosegment (segments) and hru_segment (hru's)
    # In this case, they are the same.
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general
    # Tuple for upload to SQL
    # 0 is the default value if it doesn't go into any other segment (i.e flows
    # off-map)
    tosegmentt = []
    tosegment_cats_t = []
    for i in range(len(nsegment)):
      tosegmentt.append( (tosegment[i], nsegment[i]) )
      tosegment_cats_t.append( (tosegment_cats[i], cats[i]) )
    # Once again, special case
    hru_segmentt = tosegmentt

    # Loop check!
    # Weak loop checker - will only detect direct ping-pong.
    loops = []
    tosegmenta = np.array(tosegmentt)
    for i in range(len(tosegmenta)):
      for j in range(len(tosegmenta)):
        if (tosegmenta[i] == tosegmenta[j][::-1]).all():
          loops.append(tosegmenta[i])

    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set tosegment=? where id=?", tosegmentt)
    segment.table.conn.commit()
    segment.close()

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_segment=? where id=?", hru_segmentt)
    hru.table.conn.commit()
    hru.close()


    #grass.run_command('g.rename', vect='HRU_all_2,HRU', overwrite=True)
    #grass.run_command('g.rename', vect='segment_all_2,segment', overwrite=True)

    # In study basin?
    grass.run_command('v.db.addcolumn', map='segment', columns='in_study_basin int')
    grass.run_command('v.db.addcolumn', map='HRU', columns='in_study_basin int')
    grass.run_command('v.what.vect', map='segment', column='in_study_basin', query_map='studyBasin', query_column='value')
    grass.run_command('v.what.vect', map='HRU', column='in_study_basin', query_map='segment', query_column='in_study_basin')

    # Save global segment+HRU
    grass.run_command('g.rename', vect='HRU,HRU_all')
    grass.run_command('g.rename', vect='segment,segment_all')

    # Output HRU -- will need to ensure that this is robust!
    grass.run_command('v.extract', input='HRU_all', output='HRU', where='in_study_basin=1', overwrite=True)
    grass.run_command('v.extract', input='segment_all', output='segment', where='in_study_basin=1', overwrite=True)


    colNames = np.array(grass.vector_db_select('segment')['columns'])
    colValues = np.array(grass.vector_db_select('segment')['values'].values())
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    xy1 = colValues[:,(colNames == 'x1') + (colNames == 'y1')].astype(float)
    xy2 = colValues[:,(colNames == 'x2') + (colNames == 'y2')].astype(float)
    xy  = np.vstack((xy1, xy2))

    # Redo nhru down here
    nhru = np.arange(1, xy1.shape[0]+1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
      """
      n = 1
      if i != 1:
        nhrut.append( (n, cats[i]) )
        n += 1
      """
      
    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set id=? where cat=?", nhrut)
    hru.table.conn.commit()
    hru.close()

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    # Same for segments
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general

    # Somehow only works after I v.clean, not right after v.overlay
    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set id=? where cat=?", nsegmentt)
    segment.table.conn.commit()
    segment.close()


    tosegment_cats = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    tosegment = np.zeros(len(cats)).astype(int) # default to 0 if they do not flow to another segment
    # From outlet segment
    for i in range(len(xy2)):
      # to outlet segment
      outlets = np.prod(xy2 == xy1[i], axis=1)
      # Update outlet segments with ID of inlets
      tosegment[outlets.nonzero()] = nhru[i]
      tosegment_cats[outlets.nonzero()] = cats[i]

    # Now, just update tosegment (segments) and hru_segment (hru's)
    # In this case, they are the same.
    nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
    nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general
    # Tuple for upload to SQL
    # 0 is the default value if it doesn't go into any other segment (i.e flows
    # off-map)
    tosegmentt = []
    tosegment_cats_t = []
    for i in range(len(nsegment)):
      tosegmentt.append( (tosegment[i], nsegment[i]) )
      tosegment_cats_t.append( (tosegment_cats[i], cats[i]) )
    # Once again, special case
    hru_segmentt = tosegmentt

    # Loop check!
    # Weak loop checker - will only detect direct ping-pong.
    loops = []
    tosegmenta = np.array(tosegmentt)
    for i in range(len(tosegmenta)):
      for j in range(len(tosegmenta)):
        if (tosegmenta[i] == tosegmenta[j][::-1]).all():
          loops.append(tosegmenta[i])


    segment = VectorTopo('segment')
    segment.open('rw')
    cur = segment.table.conn.cursor()
    cur.executemany("update segment set tosegment=? where id=?", tosegmentt)
    segment.table.conn.commit()
    segment.close()

    hru = VectorTopo('HRU')
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.executemany("update HRU set hru_segment=? where id=?", hru_segmentt)
    hru.table.conn.commit()
    hru.close()

    # More old-fashioned way:
    os.system('v.db.select segment sep=comma > segment.csv')
    os.system('v.db.select HRU sep=comma > HRU.csv')
    # and then sort by id, manually
    # And then manually change the last segment's "tosegment" to 0.
    # Except in this case, it was 0!
    # Maybe I managed to do this automatically above... but tired and late, 
    # so will check later
    # but hoping I did something right by re-doing all of the above before
    # saving (and doing so inside this smaller basin)

    print ""
    print "PRMS PORTION COMPLETE."
    print ""



    ###########
    # MODFLOW #
    ###########

    print ""
    print "STARTING MODFLOW PORTION."
    print ""

    # Generate coarse box for MODFLOW (ADW, 4 September, 2016)

    grass.run_command('g.region', rast='srtm')
    grass.run_command('g.region', n=7350000, s=7200000, w=170000, e=260000)
    reg = grass.region()
    MODFLOWres = 2000.
    grass.run_command('v.to.rast', input='HRU', output='allHRUs', use='val', val=1.0, overwrite=True)
    grass.run_command('r.null', map='allHRUs', null='0')
    grass.run_command('r.colors', map='allHRUs', color='grey', flags='n')
    grass.run_command('g.region', res=MODFLOWres)
    grass.run_command('r.resamp.stats', method='average', input='allHRUs', output='fraction_of_HRU_in_MODFLOW_cell', overwrite=True)
    grass.run_command('r.colors', map='fraction_of_HRU_in_MODFLOW_cell', color='grey', flags='n')


    print ""
    print "MODFLOW PORTION COMPLETE."
    print ""
Esempio n. 40
0
def main():
    """
    Builds a grid for the MODFLOW component of the USGS hydrologic model,
    GSFLOW.
    """

    options, flags = gscript.parser()
    basin = options['basin']
    pp = options['pour_point']
    raster_input = options['raster_input']
    dx = options['dx']
    dy = options['dy']
    grid = options['output']
    mask = options['mask_output']
    bc_cell = options['bc_cell']
    # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp'
    """
    # Fatal if raster input and output are not both set
    _lena0 = (len(raster_input) == 0)
    _lenb0 = (len(raster_output) == 0)
    if _lena0 + _lenb0 == 1:
        gscript.fatal("You must set both raster input and output, or neither.")
    """

    # Fatal if bc_cell set but mask and grid are false
    if bc_cell != '':
        if (mask == '') or (pp == ''):
            gscript.fatal(
                'Mask and pour point must be set to define b.c. cell')

    # Create grid -- overlaps DEM, three cells of padding
    gscript.use_temp_region()
    reg = gscript.region()
    reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows'])
    reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols'])
    g.region(vector=basin, ewres=dx, nsres=dy)
    regnew = gscript.region()
    # Use a grid ratio -- don't match exactly the desired MODFLOW resolution
    grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres'])
    grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres'])
    # Get S, W, and then move the unit number of grid cells over to get N and E
    # and include 3 cells of padding around the whole watershed
    _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres']))
    _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0]
    _s = float(reg_grid_edges_sn[_s_idx])
    _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'],
                        grid_ratio_ns * reg['nsres'])
    _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres']))
    _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0]
    _n = float(_n_grid[_n_idx])
    _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres']))
    _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0]
    _w = float(reg_grid_edges_we[_w_idx])
    _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'],
                        grid_ratio_ew * reg['ewres'])
    _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres']))
    _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0]
    _e = float(_e_grid[_e_idx])
    # Finally make the region
    g.region(w=str(_w),
             e=str(_e),
             s=str(_s),
             n=str(_n),
             nsres=str(grid_ratio_ns * reg['nsres']),
             ewres=str(grid_ratio_ew * reg['ewres']))
    # And then make the grid
    v.mkgrid(map=grid, overwrite=gscript.overwrite())

    # Cell numbers (row, column, continuous ID)
    v.db_addcolumn(map=grid, columns='id int', quiet=True)
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:, colNames == 'row'].astype(int).squeeze()
    cols = colValues[:, colNames == 'col'].astype(int).squeeze()
    nrows = np.max(rows)
    ncols = np.max(cols)
    cats = np.ravel([cats])
    _id = np.ravel([ncols * (rows - 1) + cols])
    _id_cat = []
    for i in range(len(_id)):
        _id_cat.append((_id[i], cats[i]))
    gridTopo = VectorTopo(grid)
    gridTopo.open('rw')
    cur = gridTopo.table.conn.cursor()
    cur.executemany("update " + grid + " set id=? where cat=?", _id_cat)
    gridTopo.table.conn.commit()
    gridTopo.close()

    # Cell area
    v.db_addcolumn(map=grid, columns='area_m2', quiet=True)
    v.to_db(map=grid,
            option='area',
            units='meters',
            columns='area_m2',
            quiet=True)

    # Basin mask
    if len(mask) > 0:
        # Fine resolution region:
        g.region(n=reg['n'],
                 s=reg['s'],
                 w=reg['w'],
                 e=reg['e'],
                 nsres=reg['nsres'],
                 ewres=reg['ewres'])
        # Rasterize basin
        v.to_rast(input=basin,
                  output=mask,
                  use='val',
                  value=1,
                  overwrite=gscript.overwrite(),
                  quiet=True)
        # Coarse resolution region:
        g.region(w=str(_w),
                 e=str(_e),
                 s=str(_s),
                 n=str(_n),
                 nsres=str(grid_ratio_ns * reg['nsres']),
                 ewres=str(grid_ratio_ew * reg['ewres']))
        r.resamp_stats(input=mask,
                       output=mask,
                       method='sum',
                       overwrite=True,
                       quiet=True)
        r.mapcalc('tmp' + ' = ' + mask + ' > 0', overwrite=True, quiet=True)
        g.rename(raster=('tmp', mask), overwrite=True, quiet=True)
        r.null(map=mask, null=0, quiet=True)
        # Add mask location (1 vs 0) in the MODFLOW grid
        v.db_addcolumn(map=grid,
                       columns='basinmask double precision',
                       quiet=True)
        v.what_rast(map=grid, type='centroid', raster=mask, column='basinmask')
    """
    # Resampled raster
    if len(raster_output) > 0:
        r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True)
    """

    # Pour point
    if len(pp) > 0:
        v.db_addcolumn(map=pp,
                       columns=('row integer', 'col integer'),
                       quiet=True)
        v.build(map=pp, quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='row',
                    query_column='row',
                    quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='col',
                    query_column='col',
                    quiet=True)

    # Next point downstream of the pour point
    # Requires pp (always) and mask (sometimes)
    # Dependency set above w/ gscript.fatal
    if len(bc_cell) > 0:
        ########## NEED TO USE TRUE TEMPORARY FILE ##########
        # May not work with dx != dy!
        v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True)
        r.buffer(input='tmp',
                 output='tmp',
                 distances=float(dx) * 1.5,
                 overwrite=True)
        r.mapcalc('tmp2 = if(tmp==2,1,null()) * ' + raster_input,
                  overwrite=True)
        g.rename(raster=('tmp2', 'tmp'), overwrite=True, quiet=True)
        #r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True)
        #g.region(rast='tmp')
        #r.null(map=raster_input,
        r.drain(input=raster_input,
                start_points=pp,
                output='tmp2',
                overwrite=True)
        r.mapcalc('tmp3 = tmp2 * tmp', overwrite=True, quiet=True)
        g.rename(raster=('tmp3', 'tmp'), overwrite=True, quiet=True)
        #r.null(map='tmp', setnull=0) # Not necessary: center point removed above
        r.to_vect(input='tmp',
                  output=bc_cell,
                  type='point',
                  column='z',
                  overwrite=gscript.overwrite(),
                  quiet=True)
        v.db_addcolumn(map=bc_cell,
                       columns=('row integer', 'col integer',
                                'x double precision', 'y double precision'),
                       quiet=True)
        v.build(map=bc_cell, quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='row', \
                    query_column='row', quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='col', \
                    query_column='col', quiet=True)
        v.to_db(map=bc_cell, option='coor', columns=('x,y'))

        # Find out if this is diagonal: finite difference works only N-S, W-E
        colNames = np.array(gscript.vector_db_select(pp, layer=1)['columns'])
        colValues = np.array(
            gscript.vector_db_select(pp, layer=1)['values'].values())
        pp_row = int(colValues[:, colNames == 'row'].astype(int).squeeze())
        pp_col = int(colValues[:, colNames == 'col'].astype(int).squeeze())
        colNames = np.array(
            gscript.vector_db_select(bc_cell, layer=1)['columns'])
        colValues = np.array(
            gscript.vector_db_select(bc_cell, layer=1)['values'].values())
        bc_row = int(colValues[:, colNames == 'row'].astype(int).squeeze())
        bc_col = int(colValues[:, colNames == 'col'].astype(int).squeeze())
        # Also get x and y while we are at it: may be needed later
        bc_x = float(colValues[:, colNames == 'x'].astype(float).squeeze())
        bc_y = float(colValues[:, colNames == 'y'].astype(float).squeeze())
        if (bc_row != pp_row) and (bc_col != pp_col):
            # If not diagonal, two possible locations that are adjacent
            # to the pour point
            _col1, _row1 = str(bc_col), str(pp_row)
            _col2, _row2 = str(pp_col), str(bc_row)
            # Check if either of these is covered by the basin mask
            _ismask_1 = gscript.vector_db_select(grid,
                                                 layer=1,
                                                 where='(row == ' + _row1 +
                                                 ') AND (col ==' + _col1 + ')',
                                                 columns='basinmask')
            _ismask_1 = int(_ismask_1['values'].values()[0][0])
            _ismask_2 = gscript.vector_db_select(grid,
                                                 layer=1,
                                                 where='(row == ' + _row2 +
                                                 ') AND (col ==' + _col2 + ')',
                                                 columns='basinmask')
            _ismask_2 = int(_ismask_2['values'].values()[0][0])
            # If both covered by mask, error
            if _ismask_1 and _ismask_2:
                gscript.fatal(
                    'All possible b.c. cells covered by basin mask.\n\
                             Contact the developer: awickert (at) umn(.)edu')
            # Otherwise, those that keep those that are not covered by basin
            # mask and set ...
            # ... wait, do we want the point that touches as few interior
            # cells as possible?
            # maybe just try setting both and seeing what happens for now!
            else:
                # Get dx and dy
                dx = gscript.region()['ewres']
                dy = gscript.region()['nsres']
                # Build tool to handle multiple b.c. cells?
                bcvect = vector.Vector(bc_cell)
                bcvect.open('rw')
                _cat_i = 2
                if not _ismask_1:
                    # _x should always be bc_x, but writing generalized code
                    _x = bc_x + dx * (int(_col1) - bc_col)  # col 1 at w edge
                    _y = bc_y - dy * (int(_row1) - bc_row)  # row 1 at n edge
                    point0 = Point(_x, _y)
                    bcvect.write(
                        point0,
                        cat=_cat_i,
                        attrs=(None, _row1, _col1, _x, _y),
                    )
                    bcvect.table.conn.commit()
                    _cat_i += 1
                if not _ismask_2:
                    # _y should always be bc_y, but writing generalized code
                    _x = bc_x + dx * (int(_col2) - bc_col)  # col 1 at w edge
                    _y = bc_y - dy * (int(_row2) - bc_row)  # row 1 at n edge
                    point0 = Point(_x, _y)
                    bcvect.write(
                        point0,
                        cat=_cat_i,
                        attrs=(None, _row2, _col2, _x, _y),
                    )
                    bcvect.table.conn.commit()
                # Build database table and vector geometry
                bcvect.build()
                bcvect.close()

    g.region(n=reg['n'],
             s=reg['s'],
             w=reg['w'],
             e=reg['e'],
             nsres=reg['nsres'],
             ewres=reg['ewres'])
Esempio n. 41
0
def main():
    soillossin = options['soillossin']
    soillossout = options['soillossout']
    factorold = options['factorold']
    
    factornew = options['factornew']
    map = options['map']
    factorcol = options['factorcol']
    
    flag_p = flags['p'] # patch factornew with factorold
    flag_k = flags['k'] # calculate k-factor components from % clay p_T, silt p_U, stones p_st, humus p_H 

     
    if not factornew:
        factors = {}
        if flag_k:
            gscript.message('Using factor derived from \
                soil components.')
            parcelmap = Vect(map)
            parcelmap.open(mode='rw', layer=1)
            parcelmap.table.filters.select()
            cur = parcelmap.table.execute()
            col_names = [cn[0] for cn in cur.description]
            rows = cur.fetchall()
           
            for col in (u'Kb',u'Ks',u'Kh', u'K'):
                if col not in parcelmap.table.columns:
                    parcelmap.table.columns.add(col,u'DOUBLE')
           
            for row in rows:
                rowid = row[1]
                p_T = row[7]
                p_U = row[8]
                p_st = row[9]
                p_H = row[10]
    
                print("Parzelle mit id %d :" %rowid)
                for sublist in bodenarten:
                    # p_T and p_U
                    if p_T in range(sublist[2],sublist[3]) \
                        and p_U in range(sublist[4],sublist[5]) :
                        print('Bodenart "' + sublist[1] 
                            + '", Kb = ' + str(sublist[6]))
                        Kb = sublist[6]
                        break
                
                for sublist in skelettgehalte:
                    if p_st < sublist[0]:
                        print('Skelettgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Ks = sublist[1]
                        break
            
                   
                for sublist in humusgehalte:
                    if p_H < sublist[0]:
                        print('Humusgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Kh = sublist[1]
                        break
                
                
                K = Kb * Ks * Kh
                print('K = ' + str(K))
        
                if K > 0:
                    parcelmap.table.execute("UPDATE " +  parcelmap.name 
                        + " SET"
                        + " Kb=" + str(Kb)
                        + ", Ks=" + str(Ks)
                        + ", Kh=" + str(Kh)
                        + ", K=" + str(K)
                        + " WHERE id=" + str(rowid) )
                    parcelmap.table.conn.commit()
                
            parcelmap.close()
            factorcol2 = 'K'
            
            factors['k'] = map.split('@')[0]+'.tmp.'+factorcol2
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol2,
                   output=factors['k'])
            r.null(map=factors['k'], setnull='0')

        
        if factorcol:
            gscript.message('Using factor from column %s of \
                    vector map <%s>.' % (factorcol, map) )
                    
            factors['factorcol'] = map.split('@')[0]+'.tmp.' + factorcol
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol,
                   output=factors['factorcol'])
            r.null(map=factors['factorcol'], setnull='0')
        
        print factors.keys()
        if not 'k' in factors and not 'factorcol' in factors: 
            gscript.fatal('Please provide either factor \
                raster map or valid vector map with factor column \
                (kfactor) or factor components columns (Kb, Ks, Kh)' )
        
        #if 'k' in factors and 'factorcol' in factors: 
    
        factornew = map.split('@')[0]+'.kfactor'
        if 'k' in factors and 'factorcol' in  factors:
            factornew = map.split('@')[0]+'.kfactor'
            r.patch(input=(factors['factorcol'],factors['k']),
                    output=factornew)
            
        elif 'k' in factors:
            g.copy(rast=(factors['k'],factornew))
            
        elif 'factorcol' in factors:
            g.copy(rast=(factors['factorcol'],factornew))

            
    if flag_p:
        #factorcorr = factorold + '.update'
        r.patch(input=(factornew,factorold), output=factornew)
        
    formula = soillossout + '=' + soillossin \
                + '/' + factorold  \
                + '*' + factornew
    r.mapcalc(formula)
            
    r.colors(map=soillossout, raster=soillossin)
Esempio n. 42
0
def main():
    """
    Links each river segment to the next downstream segment in a tributary 
    network by referencing its category (cat) number in a new column. "0"
    means that the river exits the map.
    """

    options, flags = gscript.parser()
    streams = options['map']
    x1 = options['upstream_easting_column']
    y1 = options['upstream_northing_column']
    x2 = options['downstream_easting_column']
    y2 = options['downstream_northing_column']

    streamsTopo = VectorTopo(streams)
    #streamsTopo.build()

    # 1. Get vectorTopo
    streamsTopo.open(mode='rw')
    """
    points_in_streams = []
    cat_of_line_segment = []

    # 2. Get coordinates
    for row in streamsTopo:
        cat_of_line_segment.append(row.cat)
        if type(row) == vector.geometry.Line:
            points_in_streams.append(row)
    """

    # 3. Coordinates of points: 1 = start, 2 = end
    try:
        streamsTopo.table.columns.add(x1, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(y1, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(x2, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add(y2, 'double precision')
    except:
        pass
    try:
        streamsTopo.table.columns.add('tostream', 'int')
    except:
        pass
    streamsTopo.table.conn.commit()

    # Is this faster than v.to.db?
    """
    cur = streamsTopo.table.conn.cursor()
    for i in range(len(points_in_streams)):
        cur.execute("update streams set x1="+str(points_in_streams[i][0].x)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set y1="+str(points_in_streams[i][0].y)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set x2="+str(points_in_streams[i][-1].x)+" where cat="+str(cat_of_line_segment[i]))
        cur.execute("update streams set y2="+str(points_in_streams[i][-1].y)+" where cat="+str(cat_of_line_segment[i]))
    streamsTopo.table.conn.commit()
    streamsTopo.build()
    """
    # v.to.db Works more consistently, at least
    streamsTopo.close()
    v.to_db(map=streams, option='start', columns=x1 + ',' + y1)
    v.to_db(map=streams, option='end', columns=x2 + ',' + y2)

    # 4. Read in and save the start and end coordinate points
    colNames = np.array(vector_db_select(streams)['columns'])
    colValues = np.array(vector_db_select(streams)['values'].values())
    cats = colValues[:,
                     colNames == 'cat'].astype(int).squeeze()  # river number
    xy1 = colValues[:, (colNames == 'x1') + (colNames == 'y1')].astype(
        float)  # upstream
    xy2 = colValues[:, (colNames == 'x2') + (colNames == 'y2')].astype(
        float)  # downstream

    # 5. Build river network
    tocat = []
    for i in range(len(cats)):
        tosegment_mask = np.prod(xy1 == xy2[i], axis=1)
        if np.sum(tosegment_mask) == 0:
            tocat.append(0)
        else:
            tocat.append(cats[tosegment_mask.nonzero()[0][0]])
    tocat = np.asarray(tocat).astype(int)

    # This gives us a set of downstream-facing adjacencies.
    # We will update the database with it.
    streamsTopo.build()
    streamsTopo.open('rw')
    cur = streamsTopo.table.conn.cursor()
    # Default to 0 if no stream flows to it
    cur.execute("update " + streams + " set tostream=0")
    for i in range(len(tocat)):
        cur.execute("update " + streams + " set tostream=" + str(tocat[i]) +
                    " where cat=" + str(cats[i]))
    streamsTopo.table.conn.commit()
    #streamsTopo.build()
    streamsTopo.close()

    gscript.message('')
    gscript.message(
        'Drainage topology built. Check "tostream" column for the downstream cat.'
    )
    gscript.message('A cat value of 0 indicates the downstream-most segment.')
    gscript.message('')
Esempio n. 43
0
# There should be a way to do this all at once, but...
for i in range(len(cats)):
  grass.run_command('v.db.update', map='HRU', column='id', value=nhru[i], where='cat='+str(cats[i]))
nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
for i in range(len(cats)):
  grass.run_command('v.db.update', map='segment', column='id', value=nsegment[i], where='cat='+str(cats[i]))
"""

nhru = np.arange(1, xy1.shape[0]+1)
nhrut = []
for i in range(len(nhru)):
  nhrut.append( (nhru[i], cats[i]) )
# Access the HRU's 
hru = VectorTopo('HRU')
# Open the map with topology:
hru.open('rw')
# Create a cursor
cur = hru.table.conn.cursor()
# Use it to loop across the table
cur.executemany("update HRU set id=? where cat=?", nhrut)
# Commit changes to the table
hru.table.conn.commit()
# Close the table
hru.close()

# if you want to append to table
# cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

# Same for segments
nsegment = nhru.copy() # ONLY FOR THIS SPECIAL CASE -- will be different in general
nsegmentt = nhrut # ONLY FOR THIS SPECIAL CASE -- will be different in general