Ejemplo n.º 1
0
    def test_strahler(self):
        self.assertModule(
            "v.stream.order",
            input="stream_network",
            points="stream_network_outlets",
            output="stream_network_order_test_strahler",
            threshold=25,
            order=["strahler"],
            overwrite=True,
            verbose=True,
        )

        # Check the strahler value
        v = VectorTopo(name="stream_network_order_test_strahler", mapset="")
        v.open(mode="r")

        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 41)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)

        v.close()
Ejemplo n.º 2
0
def _get_vector_features_as_wkb_list(lock, conn, data):
    """Return vector layer features as wkb list

    supported feature types:
    point, centroid, line, boundary, area

    :param lock: A multiprocessing.Lock instance
    :param conn: A multiprocessing.Pipe instance used to send True or False
    :param data: The list of data entries [function_id,name,mapset,extent,
                                           feature_type, field]

    """
    wkb_list = None
    try:
        name = data[1]
        mapset = data[2]
        extent = data[3]
        feature_type = data[4]
        field = data[5]
        bbox = None

        mapset = utils.get_mapset_vector(name, mapset)

        if not mapset:
            raise ValueError("Unable to find vector map <%s>" % (name))

        layer = VectorTopo(name, mapset)

        if layer.exist() is True:
            if extent is not None:
                bbox = Bbox(
                    north=extent["north"],
                    south=extent["south"],
                    east=extent["east"],
                    west=extent["west"],
                )

            layer.open("r")
            if feature_type.lower() == "area":
                wkb_list = layer.areas_to_wkb_list(bbox=bbox, field=field)
            else:
                wkb_list = layer.features_to_wkb_list(
                    bbox=bbox, feature_type=feature_type, field=field)
            layer.close()
    finally:
        # Send even if an exception was raised.
        conn.send(wkb_list)
Ejemplo n.º 3
0
def _get_vector_table_as_dict(lock, conn, data):
    """Get the table of a vector map layer as dictionary

    :param lock: A multiprocessing.Lock instance
    :param conn: A multiprocessing.Pipe instance used to send True or False
    :param data: The list of data entries [function_id, name, mapset, where]

    """
    ret = None
    try:
        name = data[1]
        mapset = data[2]
        where = data[3]

        mapset = utils.get_mapset_vector(name, mapset)

        if not mapset:
            raise ValueError("Unable to find vector map <%s>" % (name))

        layer = VectorTopo(name, mapset)

        if layer.exist() is True:
            layer.open("r")
            columns = None
            table = None
            if layer.table is not None:
                columns = layer.table.columns
                table = layer.table_to_dict(where=where)
            layer.close()

            ret = {}
            ret["table"] = table
            ret["columns"] = columns
    finally:
        # Send even if an exception was raised.
        conn.send(ret)
Ejemplo n.º 4
0
def extendLine(map, map_out, maxlen=200, scale=0.5, debug=False, verbose=1):
    #
    # map=Input map name
    # map_out=Output map with extensions
    # maxlen=Max length in map units that line can be extended (def=200)
    # scale=Maximum length of extension as proportion of original line, disabled if 0 (def=0.5)
    # vlen=number of verticies to look back in calculating line end direction (def=1)
    # Not sure if it is worth putting this in as parameter.
    #
    allowOverwrite = os.getenv('GRASS_OVERWRITE', '0') == '1'
    grass.info("map={}, map_out={}, maxlen={}, scale={}, debug={}".format(
        map, map_out, maxlen, scale, debug))
    vlen = 1  # not sure if this is worth putting in as parameter
    cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'parent', 'INTEGER'),
            (u'dend', 'TEXT'), (u'orgx', 'DOUBLE PRECISION'),
            (u'orgy', 'DOUBLE PRECISION'), (u'search_len', 'DOUBLE PRECISION'),
            (u'search_az', 'DOUBLE PRECISION'), (u'best_xid', 'INTEGER'),
            (u'near_x', 'DOUBLE PRECISION'), (u'near_y', 'DOUBLE PRECISION'),
            (u'other_cat', 'INTEGER'), (u'xtype', 'TEXT'),
            (u'x_len', 'DOUBLE PRECISION')]
    extend = VectorTopo('extend')
    if extend.exist():
        extend.remove()
    extend.open('w', tab_name='extend', tab_cols=cols)
    #
    # Go through input map, looking at each line and it's two nodes to find nodes
    # with only a single line starting/ending there - i.e. a dangle.
    # For each found, generate an extension line in the new map "extend"
    #
    inMap = VectorTopo(map)
    inMap.open('r')
    dangleCnt = 0
    tickLen = len(inMap)
    grass.info("Searching {} features for dangles".format(tickLen))
    ticker = 0
    grass.message("Percent complete...")
    for ln in inMap:
        ticker = (ticker + 1)
        grass.percent(ticker, tickLen, 5)
        if ln.gtype == 2:  # Only process lines
            for nd in ln.nodes():
                if nd.nlines == 1:  # We have a dangle
                    dangleCnt = dangleCnt + 1
                    vtx = min(len(ln) - 1, vlen)
                    if len([1 for _ in nd.lines(only_out=True)
                            ]) == 1:  # Dangle starting at node
                        dend = "head"
                        sx = ln[0].x
                        sy = ln[0].y
                        dx = sx - ln[vtx].x
                        dy = sy - ln[vtx].y
                    else:  # Dangle ending at node
                        dend = "tail"
                        sx = ln[-1].x
                        sy = ln[-1].y
                        dx = sx - ln[-(vtx + 1)].x
                        dy = sy - ln[-(vtx + 1)].y
                    endaz = math.atan2(dy, dx)
                    if scale > 0:
                        extLen = min(ln.length() * scale, maxlen)
                    else:
                        extLen = maxlen
                    ex = extLen * math.cos(endaz) + sx
                    ey = extLen * math.sin(endaz) + sy
                    extLine = geo.Line([(sx, sy), (ex, ey)])
                    quiet = extend.write(extLine,
                                         (ln.cat, dend, sx, sy, extLen, endaz,
                                          0, 0, 0, 0, 'null', extLen))

    grass.info(
        "{} dangle nodes found, committing table extend".format(dangleCnt))
    extend.table.conn.commit()
    extend.close(build=True, release=True)
    inMap.close()

    #
    # Create two tables where extensions intersect;
    # 1. intersect with original lines
    # 2. intersect with self - to extract intersects between extensions
    #
    # First the intersects with original lines
    grass.info(
        "Searching for intersects between potential extensions and original lines"
    )
    table_isectIn = Table('isectIn',
                          connection=sqlite3.connect(get_path(path)))
    if table_isectIn.exist():
        table_isectIn.drop(force=True)
    run_command("v.distance",
                flags='a',
                overwrite=True,
                quiet=True,
                from_="extend",
                from_type="line",
                to=map,
                to_type="line",
                dmax="0",
                upload="cat,dist,to_x,to_y",
                column="near_cat,dist,nx,ny",
                table="isectIn")
    # Will have touched the dangle it comes from, so remove those touches
    run_command(
        "db.execute",
        sql=
        "DELETE FROM isectIn WHERE rowid IN (SELECT isectIn.rowid FROM isectIn INNER JOIN extend ON from_cat=cat WHERE near_cat=parent)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    run_command("db.execute",
                sql="ALTER TABLE isectIn ADD ntype VARCHAR",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    run_command("db.execute",
                sql="UPDATE isectIn SET ntype = 'orig' ",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    #
    # Now second self intersect table
    #
    grass.info("Searching for intersects of potential extensions")
    table_isectX = Table('isectX', connection=sqlite3.connect(get_path(path)))
    if table_isectX.exist():
        table_isectX.drop(force=True)
    run_command("v.distance",
                flags='a',
                overwrite=True,
                quiet=True,
                from_="extend",
                from_type="line",
                to="extend",
                to_type="line",
                dmax="0",
                upload="cat,dist,to_x,to_y",
                column="near_cat,dist,nx,ny",
                table="isectX")
    # Obviously all extensions will intersect with themself, so remove those "intersects"
    run_command("db.execute",
                sql="DELETE FROM isectX WHERE from_cat = near_cat",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    run_command("db.execute",
                sql="ALTER TABLE isectX ADD ntype VARCHAR",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    run_command("db.execute",
                sql="UPDATE isectX SET ntype = 'ext' ",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    #
    # Combine the two tables and add a few more attributes
    #
    run_command("db.execute",
                sql="INSERT INTO isectIn SELECT * FROM isectX",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    cols_isectIn = Columns('isectIn',
                           connection=sqlite3.connect(get_path(path)))
    cols_isectIn.add(['from_x'], ['DOUBLE PRECISION'])
    cols_isectIn.add(['from_y'], ['DOUBLE PRECISION'])
    cols_isectIn.add(['ext_len'], ['DOUBLE PRECISION'])
    # Get starting coordinate at the end of the dangle
    run_command(
        "db.execute",
        sql=
        "UPDATE isectIn SET from_x = (SELECT extend.orgx FROM extend WHERE from_cat=extend.cat)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    run_command(
        "db.execute",
        sql=
        "UPDATE isectIn SET from_y = (SELECT extend.orgy FROM extend WHERE from_cat=extend.cat)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    table_isectIn.conn.commit()
    # For each intersect point, calculate the distance along extension line from end of dangle
    # Would be nicer to do this in the database but SQLite dosen't support sqrt or exponents
    grass.info(
        "Calculating distances of intersects along potential extensions")
    cur = table_isectIn.execute(
        sql_code="SELECT rowid, from_x, from_y, nx, ny FROM isectIn")
    for row in cur.fetchall():
        rowid, fx, fy, nx, ny = row
        x_len = math.sqrt((fx - nx)**2 + (fy - ny)**2)
        sqlStr = "UPDATE isectIn SET ext_len={:.8f} WHERE rowid={:d}".format(
            x_len, rowid)
        table_isectIn.execute(sql_code=sqlStr)
    grass.verbose("Ready to commit isectIn changes")
    table_isectIn.conn.commit()
    # Remove any zero distance from end of their dangle.
    # This happens when another extension intersects exactly at that point
    run_command("db.execute",
                sql="DELETE FROM isectIn WHERE ext_len = 0.0",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    table_isectIn.conn.commit()

    # Go through the extensions and find the intersect closest to each origin.
    grass.info("Searching for closest intersect for each potential extension")

    # db.execute sql="ALTER TABLE extend_t1 ADD COLUMN bst INTEGER"
    # db.execute sql="ALTER TABLE extend_t1 ADD COLUMN nrx DOUBLE PRECISION"
    # db.execute sql="ALTER TABLE extend_t1 ADD COLUMN nry DOUBLE PRECISION"
    # db.execute sql="ALTER TABLE extend_t1 ADD COLUMN ocat TEXT"
    #    run_command("db.execute",
    #                sql = "INSERT OR REPLACE INTO extend_t1 (bst, nrx, nry, ocat) VALUES ((SELECT isectIn.rowid, ext_len, nx, ny, near_cat, ntype FROM isectIn WHERE from_cat=extend_t1.cat ORDER BY ext_len ASC LIMIT 1))",
    #               driver = "sqlite",
    #               database = "$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")

    grass.verbose("CREATE index")
    run_command("db.execute",
                sql="CREATE INDEX idx_from_cat ON isectIn (from_cat)",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("UPDATE best_xid")
    run_command(
        "db.execute",
        sql=
        "UPDATE extend SET best_xid = (SELECT isectIn.rowid FROM isectIn WHERE from_cat=extend.cat ORDER BY ext_len ASC LIMIT 1)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("UPDATE x_len")
    run_command(
        "db.execute",
        sql=
        "UPDATE extend SET x_len = (SELECT ext_len FROM isectIn WHERE from_cat=extend.cat ORDER BY ext_len ASC LIMIT 1)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("UPDATE near_x")
    run_command(
        "db.execute",
        sql=
        "UPDATE extend SET near_x = (SELECT nx FROM isectIn WHERE from_cat=extend.cat ORDER BY ext_len ASC LIMIT 1)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("UPDATE near_y")
    run_command(
        "db.execute",
        sql=
        "UPDATE extend SET near_y = (SELECT ny FROM isectIn WHERE from_cat=extend.cat ORDER BY ext_len ASC LIMIT 1)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("UPDATE other_cat")
    run_command(
        "db.execute",
        sql=
        "UPDATE extend SET other_cat = (SELECT near_cat FROM isectIn WHERE from_cat=extend.cat ORDER BY ext_len ASC LIMIT 1)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("UPDATE xtype")
    run_command(
        "db.execute",
        sql=
        "UPDATE extend SET xtype = (SELECT ntype FROM isectIn WHERE from_cat=extend.cat ORDER BY ext_len ASC LIMIT 1)",
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("DROP index")
    run_command("db.execute",
                sql="DROP INDEX idx_from_cat",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    grass.verbose("CREATE index on near_cat")
    run_command("db.execute",
                sql="CREATE INDEX idx_near_cat ON isectIn (near_cat)",
                driver="sqlite",
                database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")

    quiet = table_isectIn.filters.select('rowid', 'ext_len', 'nx', 'ny',
                                         'near_cat', 'ntype')
    #    quiet=table_isectIn.filters.order_by(['ext_len ASC'])
    quiet = table_isectIn.filters.order_by('ext_len ASC')
    quiet = table_isectIn.filters.limit(1)
    table_extend = Table('extend', connection=sqlite3.connect(get_path(path)))

    # Code below was relplaced by commands above untill memory problem can be sorted
    #    table_extend.filters.select('cat')
    #    cur=table_extend.execute()
    #    updateCnt = 0
    #    for row in cur.fetchall():
    #        cat, = row
    #        quiet=table_isectIn.filters.where('from_cat={:d}'.format(cat))

    ##SELECT rowid, ext_len, nx, ny, near_cat, ntype FROM isectIn WHERE from_cat=32734 ORDER BY ext_len ASC LIMIT 1

    #        x_sect=table_isectIn.execute().fetchone()
    #        if x_sect is not None:
    #            x_rowid, ext_len, nx, ny, other_cat, ntype = x_sect
    #            sqlStr="UPDATE extend SET best_xid={:d}, x_len={:.8f}, near_x={:.8f}, near_y={:.8f}, other_cat={:d}, xtype='{}' WHERE cat={:d}".format(x_rowid, ext_len, nx, ny, other_cat, ntype, cat)
    #            table_extend.execute(sql_code=sqlStr)
    ## Try periodic commit to avoide crash!
    #            updateCnt = (updateCnt + 1) % 10000
    #            if updateCnt == 0:
    #              table_extend.conn.commit()
    grass.verbose("Ready to commit extend changes")
    table_extend.conn.commit()
    #
    # There may be extensions that crossed, and that intersection chosen by one but
    # not "recripricated" by the other.
    # Need to remove those possibilities and allow the jilted extension to re-search.
    #
    grass.verbose("Deleting intersects already resolved")
    run_command(
        "db.execute",
        sql=
        "DELETE FROM isectIn WHERE rowid IN (SELECT isectIn.rowid FROM isectIn JOIN extend ON near_cat=cat WHERE ntype='ext' AND xtype!='null')",  #"AND from_cat!=other_cat" no second chance!
        driver="sqlite",
        database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db")
    table_isectIn.conn.commit()
    grass.verbose("Deleting complete")

    # To find the jilted - need a copy of extensions that have found an
    # intersection (won't overwrite so drop first)
    grass.verbose(
        "Re-searching for mis-matched intersects between potential extensions")
    table_imatch = Table('imatch', connection=sqlite3.connect(get_path(path)))
    if table_imatch.exist():
        table_imatch.drop(force=True)
    wvar = "xtype!='null'"
    run_command(
        "db.copy",
        overwrite=True,
        quiet=True,
        from_driver="sqlite",
        from_database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db",
        from_table="extend",
        to_driver="sqlite",
        to_database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db",
        to_table="imatch",
        where=wvar)
    # Memory problems?
    if gc.isenabled():
        grass.verbose("Garbage collection enabled - forcing gc cycle")
        gc.collect()
    else:
        grass.verbose("Garbage collection not enabled")
# Ensure tables are commited
    table_extend.conn.commit()
    table_imatch.conn.commit()
    table_isectIn.conn.commit()
    # Identify the jilted
    sqlStr = "SELECT extend.cat FROM extend JOIN imatch ON extend.other_cat=imatch.cat WHERE extend.xtype='ext' and extend.cat!=imatch.other_cat"
    cur = table_extend.execute(sql_code=sqlStr)
    updateCnt = 0
    for row in cur.fetchall():
        cat, = row
        grass.verbose("Reworking extend.cat={}".format(cat))
        quiet = table_isectIn.filters.where('from_cat={:d}'.format(cat))
        #print("SQL: {}".format(table_isectIn.filters.get_sql()))
        x_sect = table_isectIn.execute().fetchone(
        )  ## Problem here under modules
        if x_sect is None:
            sqlStr = "UPDATE extend SET best_xid=0, x_len=search_len, near_x=0, near_y=0, other_cat=0, xtype='null' WHERE cat={:d}".format(
                cat)
        else:
            x_rowid, ext_len, nx, ny, other_cat, ntype = x_sect
            sqlStr = "UPDATE extend SET best_xid={:d}, x_len={:.8f}, near_x={:.8f}, near_y={:.8f}, other_cat={:d}, xtype='{}' WHERE cat={:d}".format(
                x_rowid, ext_len, nx, ny, other_cat, ntype, cat)
        table_extend.execute(sql_code=sqlStr)
        ## Try periodic commit to avoide crash!
        updateCnt = (updateCnt + 1) % 100
        if (updateCnt == 0):  # or (cat == 750483):
            grass.verbose(
                "XXXXXXXXXXX Committing table_extend XXXXXXXXXXXXXXXXXXXXXX")
            table_extend.conn.commit()

    grass.verbose("Committing adjustments to table extend")
    table_extend.conn.commit()
    #
    # For debugging, create a map with the chosen intersect points
    #
    if debug:
        wvar = "xtype!='null' AND x_len!=0"
        #        print(wvar)
        run_command(
            "v.in.db",
            overwrite=True,
            quiet=True,
            table="extend",
            driver="sqlite",
            database="$GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db",
            x="near_x",
            y="near_y",
            key="cat",
            where=wvar,
            output="chosen")
#
# Finally adjust the dangle lines in input map - use a copy (map_out) if requested
#
    if map_out:
        run_command("g.copy",
                    overwrite=allowOverwrite,
                    quiet=True,
                    vector=map + "," + map_out)
    else:  # Otherwise just modify the original dataset (map)
        if allowOverwrite:
            grass.warning("Modifying vector map ({})".format(map))
            map_out = map
        else:
            grass.error(
                "Use switch --o to modifying input vector map ({})".format(
                    map))
            return 1
#
# Get info for lines that need extending
    table_extend.filters.select(
        'parent, dend, near_x, near_y, search_az, xtype')
    table_extend.filters.where("xtype!='null'")
    extLines = table_extend.execute().fetchall()
    cat_mods = [ext[0] for ext in extLines]
    tickLen = len(cat_mods)
    grass.info("Extending {} dangles".format(tickLen))
    ticker = 0
    grass.message("Percent complete...")

    # Open up the map_out copy (or the original) and work through looking for lines that need modifying
    inMap = VectorTopo(map_out)
    inMap.open('rw', tab_name=map_out)

    for ln_idx in range(len(inMap)):
        ln = inMap.read(ln_idx + 1)
        if ln.gtype == 2:  # Only process lines
            while ln.cat in cat_mods:  # Note: could be 'head' and 'tail'
                ticker = (ticker + 1)
                grass.percent(ticker, tickLen, 5)
                cat_idx = cat_mods.index(ln.cat)
                cat, dend, nx, ny, endaz, xtype = extLines.pop(cat_idx)
                dump = cat_mods.pop(cat_idx)
                if xtype == 'orig':  # Overshoot by 0.1 as break lines is unreliable
                    nx = nx + 0.1 * math.cos(endaz)
                    ny = ny + 0.1 * math.sin(endaz)
                newEnd = geo.Point(x=nx, y=ny, z=None)
                if dend == 'head':
                    ln.insert(0, newEnd)
                else:  # 'tail'
                    ln.append(newEnd)
                quiet = inMap.rewrite(ln_idx + 1, ln)
        else:
            quite = inMap.delete(ln_idx + 1)


## Try periodic commit and garbage collection to avoide crash!
        if (ln_idx % 1000) == 0:
            #           inMap.table.conn.commit()  - no such thing - Why??
            if gc.isenabled():
                quiet = gc.collect()

    inMap.close(build=True, release=True)
    grass.message("v.extendlines completing")
    #
    # Clean up temporary tables and maps
    #
    if not debug:
        table_isectIn.drop(force=True)
        table_isectX.drop(force=True)
        table_imatch.drop(force=True)
        extend.remove()
        chosen = VectorTopo('chosen')
        if chosen.exist():
            chosen.remove()
    return 0
Ejemplo n.º 5
0
def main():
    from dateutil.parser import parse

    try:
        from pygbif import occurrences
        from pygbif import species
    except ImportError:
        grass.fatal(
            _("Cannot import pygbif (https://github.com/sckott/pygbif)"
              " library."
              " Please install it (pip install pygbif)"
              " or ensure that it is on path"
              " (use PYTHONPATH variable)."))

    # Parse input options
    output = options["output"]
    mask = options["mask"]
    species_maps = flags["i"]
    no_region_limit = flags["r"]
    no_topo = flags["b"]
    print_species = flags["p"]
    print_species_table = flags["t"]
    print_species_shell = flags["g"]
    print_occ_number = flags["o"]
    allow_no_geom = flags["n"]
    hasGeoIssue = flags["s"]
    taxa_list = options["taxa"].split(",")
    institutionCode = options["institutioncode"]
    basisofrecord = options["basisofrecord"]
    recordedby = options["recordedby"].split(",")
    date_from = options["date_from"]
    date_to = options["date_to"]
    country = options["country"]
    continent = options["continent"]
    rank = options["rank"]

    # Define static variable
    # Initialize cat
    cat = 0
    # Number of occurrences to fetch in one request
    chunk_size = 300
    # lat/lon proj string
    latlon_crs = [
        "+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0.000,0.000,0.000",
        "+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0,0,0,0,0,0,0",
        "+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0.000,0.000,0.000 +type=crs",
    ]
    # List attributes available in Darwin Core
    # not all attributes are returned in each request
    # to avoid key errors when accessing the dictionary returned by pygbif
    # presence of DWC keys in the returned dictionary is checked using this list
    # The number of keys in this list has to be equal to the number of columns
    # in the attribute table and the attributes written for each occurrence
    dwc_keys = [
        "key",
        "taxonRank",
        "taxonKey",
        "taxonID",
        "scientificName",
        "species",
        "speciesKey",
        "genericName",
        "genus",
        "genusKey",
        "family",
        "familyKey",
        "order",
        "orderKey",
        "class",
        "classKey",
        "phylum",
        "phylumKey",
        "kingdom",
        "kingdomKey",
        "eventDate",
        "verbatimEventDate",
        "startDayOfYear",
        "endDayOfYear",
        "year",
        "month",
        "day",
        "occurrenceID",
        "occurrenceStatus",
        "occurrenceRemarks",
        "Habitat",
        "basisOfRecord",
        "preparations",
        "sex",
        "type",
        "locality",
        "verbatimLocality",
        "decimalLongitude",
        "decimalLatitude",
        "coordinateUncertaintyInMeters",
        "geodeticDatum",
        "higerGeography",
        "continent",
        "country",
        "countryCode",
        "stateProvince",
        "gbifID",
        "protocol",
        "identifier",
        "recordedBy",
        "identificationID",
        "identifiers",
        "dateIdentified",
        "modified",
        "institutionCode",
        "lastInterpreted",
        "lastParsed",
        "references",
        "relations",
        "catalogNumber",
        "occurrenceDetails",
        "datasetKey",
        "datasetName",
        "collectionCode",
        "rights",
        "rightsHolder",
        "license",
        "publishingOrgKey",
        "publishingCountry",
        "lastCrawled",
        "specificEpithet",
        "facts",
        "issues",
        "extensions",
        "language",
    ]
    # Deinfe columns for attribute table
    cols = [
        ("cat", "INTEGER PRIMARY KEY"),
        ("g_search", "varchar(100)"),
        ("g_key", "integer"),
        ("g_taxonrank", "varchar(50)"),
        ("g_taxonkey", "integer"),
        ("g_taxonid", "varchar(50)"),
        ("g_scientificname", "varchar(255)"),
        ("g_species", "varchar(255)"),
        ("g_specieskey", "integer"),
        ("g_genericname", "varchar(255)"),
        ("g_genus", "varchar(50)"),
        ("g_genuskey", "integer"),
        ("g_family", "varchar(50)"),
        ("g_familykey", "integer"),
        ("g_order", "varchar(50)"),
        ("g_orderkey", "integer"),
        ("g_class", "varchar(50)"),
        ("g_classkey", "integer"),
        ("g_phylum", "varchar(50)"),
        ("g_phylumkey", "integer"),
        ("g_kingdom", "varchar(50)"),
        ("g_kingdomkey", "integer"),
        ("g_eventdate", "text"),
        ("g_verbatimeventdate", "varchar(50)"),
        ("g_startDayOfYear", "integer"),
        ("g_endDayOfYear", "integer"),
        ("g_year", "integer"),
        ("g_month", "integer"),
        ("g_day", "integer"),
        ("g_occurrenceid", "varchar(255)"),
        ("g_occurrenceStatus", "varchar(50)"),
        ("g_occurrenceRemarks", "varchar(50)"),
        ("g_Habitat", "varchar(50)"),
        ("g_basisofrecord", "varchar(50)"),
        ("g_preparations", "varchar(50)"),
        ("g_sex", "varchar(50)"),
        ("g_type", "varchar(50)"),
        ("g_locality", "varchar(255)"),
        ("g_verbatimlocality", "varchar(255)"),
        ("g_decimallongitude", "double precision"),
        ("g_decimallatitude", "double precision"),
        ("g_coordinateUncertaintyInMeters", "double precision"),
        ("g_geodeticdatum", "varchar(50)"),
        ("g_higerGeography", "varchar(255)"),
        ("g_continent", "varchar(50)"),
        ("g_country", "varchar(50)"),
        ("g_countryCode", "varchar(50)"),
        ("g_stateProvince", "varchar(50)"),
        ("g_gbifid", "varchar(255)"),
        ("g_protocol", "varchar(255)"),
        ("g_identifier", "varchar(50)"),
        ("g_recordedby", "varchar(255)"),
        ("g_identificationid", "varchar(255)"),
        ("g_identifiers", "text"),
        ("g_dateidentified", "text"),
        ("g_modified", "text"),
        ("g_institutioncode", "varchar(50)"),
        ("g_lastinterpreted", "text"),
        ("g_lastparsed", "text"),
        ("g_references", "varchar(255)"),
        ("g_relations", "text"),
        ("g_catalognumber", "varchar(50)"),
        ("g_occurrencedetails", "text"),
        ("g_datasetkey", "varchar(50)"),
        ("g_datasetname", "varchar(255)"),
        ("g_collectioncode", "varchar(50)"),
        ("g_rights", "varchar(255)"),
        ("g_rightsholder", "varchar(255)"),
        ("g_license", "varchar(50)"),
        ("g_publishingorgkey", "varchar(50)"),
        ("g_publishingcountry", "varchar(50)"),
        ("g_lastcrawled", "text"),
        ("g_specificepithet", "varchar(50)"),
        ("g_facts", "text"),
        ("g_issues", "text"),
        ("g_extensions", "text"),
        ("g_language", "varchar(50)"),
    ]

    # maybe no longer required in Python3
    set_output_encoding()
    # Set temporal filter if requested by user
    # Initialize eventDate filter
    eventDate = None
    # Check if date from is compatible (ISO compliant)
    if date_from:
        try:
            parse(date_from)
        except:
            grass.fatal("Invalid invalid start date provided")

        if date_from and not date_to:
            eventDate = "{}".format(date_from)
    # Check if date to is compatible (ISO compliant)
    if date_to:
        try:
            parse(date_to)
        except:
            grass.fatal("Invalid invalid end date provided")
        # Check if date to is after date_from
        if parse(date_from) < parse(date_to):
            eventDate = "{},{}".format(date_from, date_to)
        else:
            grass.fatal(
                "Invalid date range: End date has to be after start date!")
    # Set filter on basisOfRecord if requested by user
    if basisofrecord == "ALL":
        basisOfRecord = None
    else:
        basisOfRecord = basisofrecord
    # Allow also occurrences with spatial issues if requested by user
    hasGeospatialIssue = False
    if hasGeoIssue:
        hasGeospatialIssue = True
    # Allow also occurrences without coordinates if requested by user
    hasCoordinate = True
    if allow_no_geom:
        hasCoordinate = False

    # Set reprojection parameters
    # Set target projection of current LOCATION
    proj_info = grass.parse_command("g.proj", flags="g")
    target_crs = grass.read_command("g.proj", flags="fj").rstrip()
    target = osr.SpatialReference()

    # Prefer EPSG CRS definitions
    if proj_info["epsg"]:
        target.ImportFromEPSG(int(proj_info["epsg"]))
    else:
        target.ImportFromProj4(target_crs)

    # GDAL >= 3 swaps x and y axis, see: github.com/gdal/issues/1546
    if int(gdal_version[0]) >= 3:
        target.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)

    if target_crs == "XY location (unprojected)":
        grass.fatal("Sorry, XY locations are not supported!")

    # Set source projection from GBIF
    source = osr.SpatialReference()
    source.ImportFromEPSG(4326)
    # GDAL >= 3 swaps x and y axis, see: github.com/gdal/issues/1546
    if int(gdal_version[0]) >= 3:
        source.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)

    if target_crs not in latlon_crs:
        transform = osr.CoordinateTransformation(source, target)
        reverse_transform = osr.CoordinateTransformation(target, source)

    # Generate WKT polygon to use for spatial filtering if requested
    if mask:
        if len(mask.split("@")) == 2:
            m = VectorTopo(mask.split("@")[0], mapset=mask.split("@")[1])
        else:
            m = VectorTopo(mask)
        if not m.exist():
            grass.fatal("Could not find vector map <{}>".format(mask))
        m.open("r")
        if not m.is_open():
            grass.fatal("Could not open vector map <{}>".format(mask))

        # Use map Bbox as spatial filter if map contains <> 1 area
        if m.number_of("areas") == 1:
            region_pol = [area.to_wkt() for area in m.viter("areas")][0]
        else:
            bbox = (str(m.bbox()).replace("Bbox(", "").replace(
                " ", "").rstrip(")").split(","))
            region_pol = "POLYGON (({0} {1}, {0} {3}, {2} {3}, {2} {1}, {0} {1}))".format(
                bbox[2], bbox[0], bbox[3], bbox[1])
        m.close()
    else:
        # Do not limit import spatially if LOCATION is able to take global data
        if no_region_limit:
            if target_crs not in latlon_crs:
                grass.fatal("Import of data from outside the current region is"
                            "only supported in a WGS84 location!")
            region_pol = None
        else:
            # Limit import spatially to current region
            # if LOCATION is !NOT! able to take global data
            # to avoid pprojection ERRORS
            region = grass.parse_command("g.region", flags="g")
            region_pol = "POLYGON (({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format(
                region["e"], region["n"], region["w"], region["s"])

    # Do not reproject in latlon LOCATIONS
    if target_crs not in latlon_crs:
        pol = ogr.CreateGeometryFromWkt(region_pol)
        pol.Transform(reverse_transform)
        pol = pol.ExportToWkt()
    else:
        pol = region_pol

    # Create output map if not output maps for each species are requested
    if (not species_maps and not print_species and not print_species_shell
            and not print_occ_number and not print_species_table):
        mapname = output
        new = Vector(mapname)
        new.open("w", tab_name=mapname, tab_cols=cols)
        cat = 1

    # Import data for each species
    for s in taxa_list:
        # Get the taxon key if not the taxon key is provided as input
        try:
            key = int(s)
        except:
            try:
                species_match = species.name_backbone(s,
                                                      rank=rank,
                                                      strict=False,
                                                      verbose=True)
                key = species_match["usageKey"]
            except:
                grass.error(
                    "Data request for taxon {} failed. Are you online?".format(
                        s))
                continue

        # Return matching taxon and alternatives and exit
        if print_species:
            print("Matching taxon for {} is:".format(s))
            print("{} {}".format(species_match["scientificName"],
                                 species_match["status"]))
            if "alternatives" in list(species_match.keys()):
                print("Alternative matches might be: {}".format(s))
                for m in species_match["alternatives"]:
                    print("{} {}".format(m["scientificName"], m["status"]))
            else:
                print("No alternatives found for the given taxon")
            continue
        if print_species_shell:
            print("match={}".format(species_match["scientificName"]))
            if "alternatives" in list(species_match.keys()):
                alternatives = []
                for m in species_match["alternatives"]:
                    alternatives.append(m["scientificName"])
                print("alternatives={}".format(",".join(alternatives)))
            continue
        if print_species_table:
            if "alternatives" in list(species_match.keys()):
                if len(species_match["alternatives"]) == 0:
                    print("{0}|{1}|{2}|".format(
                        s, key, species_match["scientificName"]))
                else:
                    alternatives = []
                    for m in species_match["alternatives"]:
                        alternatives.append(m["scientificName"])
                    print("{0}|{1}|{2}|{3}".format(
                        s,
                        key,
                        species_match["scientificName"],
                        ",".join(alternatives),
                    ))
            continue
        try:
            returns_n = occurrences.search(
                taxonKey=key,
                hasGeospatialIssue=hasGeospatialIssue,
                hasCoordinate=hasCoordinate,
                institutionCode=institutionCode,
                basisOfRecord=basisOfRecord,
                recordedBy=recordedby,
                eventDate=eventDate,
                continent=continent,
                country=country,
                geometry=pol,
                limit=1,
            )["count"]
        except:
            grass.error(
                "Data request for taxon {} faild. Are you online?".format(s))
            returns_n = 0

        # Exit if search does not give a return
        # Print only number of returns for the given search and exit
        if print_occ_number:
            print("Found {0} occurrences for taxon {1}...".format(
                returns_n, s))
            continue
        elif returns_n <= 0:
            grass.warning(
                "No occurrences for current search for taxon {0}...".format(s))
            continue
        elif returns_n >= 200000:
            grass.warning(
                "Your search for {1} returns {0} records.\n"
                "Unfortunately, the GBIF search API is limited to 200,000 records per request.\n"
                "The download will be incomplete. Please consider to split up your search."
                .format(returns_n, s))

        # Get the number of chunks to download
        chunks = int(math.ceil(returns_n / float(chunk_size)))
        grass.verbose("Downloading {0} occurrences for taxon {1}...".format(
            returns_n, s))

        # Create a map for each species if requested using map name as suffix
        if species_maps:
            mapname = "{}_{}".format(s.replace(" ", "_"), output)

            new = Vector(mapname)
            new.open("w", tab_name=mapname, tab_cols=cols)
            cat = 0

        # Download the data from GBIF
        for c in range(chunks):
            # Define offset
            offset = c * chunk_size
            # Adjust chunk_size to the hard limit of 200,000 records in GBIF API
            # if necessary
            if offset + chunk_size >= 200000:
                chunk_size = 200000 - offset
            # Get the returns for the next chunk
            returns = occurrences.search(
                taxonKey=key,
                hasGeospatialIssue=hasGeospatialIssue,
                hasCoordinate=hasCoordinate,
                institutionCode=institutionCode,
                basisOfRecord=basisOfRecord,
                recordedBy=recordedby,
                eventDate=eventDate,
                continent=continent,
                country=country,
                geometry=pol,
                limit=chunk_size,
                offset=offset,
            )

            # Write the returned data to map and attribute table
            for res in returns["results"]:
                if target_crs not in latlon_crs:
                    point = ogr.CreateGeometryFromWkt("POINT ({} {})".format(
                        res["decimalLongitude"], res["decimalLatitude"]))
                    point.Transform(transform)
                    x = point.GetX()
                    y = point.GetY()
                else:
                    x = res["decimalLongitude"]
                    y = res["decimalLatitude"]

                point = Point(x, y)

                for k in dwc_keys:
                    if k not in list(res.keys()):
                        res.update({k: None})

                cat = cat + 1
                new.write(
                    point,
                    cat=cat,
                    attrs=(
                        "{}".format(s),
                        res["key"],
                        res["taxonRank"],
                        res["taxonKey"],
                        res["taxonID"],
                        res["scientificName"],
                        res["species"],
                        res["speciesKey"],
                        res["genericName"],
                        res["genus"],
                        res["genusKey"],
                        res["family"],
                        res["familyKey"],
                        res["order"],
                        res["orderKey"],
                        res["class"],
                        res["classKey"],
                        res["phylum"],
                        res["phylumKey"],
                        res["kingdom"],
                        res["kingdomKey"],
                        "{}".format(res["eventDate"])
                        if res["eventDate"] else None,
                        "{}".format(res["verbatimEventDate"])
                        if res["verbatimEventDate"] else None,
                        res["startDayOfYear"],
                        res["endDayOfYear"],
                        res["year"],
                        res["month"],
                        res["day"],
                        res["occurrenceID"],
                        res["occurrenceStatus"],
                        res["occurrenceRemarks"],
                        res["Habitat"],
                        res["basisOfRecord"],
                        res["preparations"],
                        res["sex"],
                        res["type"],
                        res["locality"],
                        res["verbatimLocality"],
                        res["decimalLongitude"],
                        res["decimalLatitude"],
                        res["coordinateUncertaintyInMeters"],
                        res["geodeticDatum"],
                        res["higerGeography"],
                        res["continent"],
                        res["country"],
                        res["countryCode"],
                        res["stateProvince"],
                        res["gbifID"],
                        res["protocol"],
                        res["identifier"],
                        res["recordedBy"],
                        res["identificationID"],
                        ",".join(res["identifiers"]),
                        "{}".format(res["dateIdentified"])
                        if res["dateIdentified"] else None,
                        "{}".format(res["modified"])
                        if res["modified"] else None,
                        res["institutionCode"],
                        "{}".format(res["lastInterpreted"])
                        if res["lastInterpreted"] else None,
                        "{}".format(res["lastParsed"])
                        if res["lastParsed"] else None,
                        res["references"],
                        ",".join(res["relations"]),
                        res["catalogNumber"],
                        "{}".format(res["occurrenceDetails"])
                        if res["occurrenceDetails"] else None,
                        res["datasetKey"],
                        res["datasetName"],
                        res["collectionCode"],
                        res["rights"],
                        res["rightsHolder"],
                        res["license"],
                        res["publishingOrgKey"],
                        res["publishingCountry"],
                        "{}".format(res["lastCrawled"])
                        if res["lastCrawled"] else None,
                        res["specificEpithet"],
                        ",".join(res["facts"]),
                        ",".join(res["issues"]),
                        ",".join(res["extensions"]),
                        res["language"],
                    ),
                )

                cat = cat + 1

        # Close the current map if a map for each species is requested
        if species_maps:
            new.table.conn.commit()
            new.close()
            if not no_topo:
                grass.run_command("v.build", map=mapname, option="build")

            # Write history to map
            grass.vector_history(mapname)

    # Close the output map if not a map for each species is requested
    if (not species_maps and not print_species and not print_species_shell
            and not print_occ_number and not print_species_table):
        new.table.conn.commit()
        new.close()
        if not no_topo:
            grass.run_command("v.build", map=mapname, option="build")

        # Write history to map
        grass.vector_history(mapname)
Ejemplo n.º 6
0
def main():
    in_vector = options["input"].split("@")[0]
    if len(options["input"].split("@")) > 1:
        in_mapset = options["input"].split("@")[1]
    else:
        in_mapset = None
    raster_maps = options["raster"].split(
        ",")  # raster file(s) to extract from
    output = options["output"]
    methods = tuple(options["methods"].split(","))
    percentile = (None if options["percentile"] == "" else map(
        float, options["percentile"].split(",")))
    column_prefix = tuple(options["column_prefix"].split(","))
    buffers = options["buffers"].split(",")
    types = options["type"].split(",")
    layer = options["layer"]
    sep = options["separator"]
    update = flags["u"]
    tabulate = flags["t"]
    percent = flags["p"]
    remove = flags["r"]
    use_label = flags["l"]

    empty_buffer_warning = (
        "No data in raster map {} within buffer {} around geometry {}")

    # Do checks using pygrass
    for rmap in raster_maps:
        r_map = RasterAbstractBase(rmap)
        if not r_map.exist():
            grass.fatal("Could not find raster map {}.".format(rmap))

    user_mask = False
    m_map = RasterAbstractBase("MASK", Mapset().name)
    if m_map.exist():
        grass.warning("Current MASK is temporarily renamed.")
        user_mask = True
        unset_mask()

    invect = VectorTopo(in_vector)
    if not invect.exist():
        grass.fatal("Vector file {} does not exist".format(in_vector))

    if output:
        if output == "-":
            out = None
        else:
            out = open(output, "w")

    # Check if input map is in current mapset (and thus editable)
    if in_mapset and unicode(in_mapset) != unicode(Mapset()):
        grass.fatal(
            "Input vector map is not in current mapset and cannot be modified. \
                    Please consider copying it to current mapset.".format(
                output))

    buffers = []
    for buf in options["buffers"].split(","):
        try:
            b = float(buf)
            if b.is_integer():
                buffers.append(int(b))
            else:
                buffers.append(b)
        except:
            grass.fatal("")
        if b < 0:
            grass.fatal("Negative buffer distance not supported!")

    ### Define column types depenting on statistic, map type and
    ### DB backend (SQLite supports only double and not real)
    # int: statistic produces allways integer precision
    # double: statistic produces allways floating point precision
    # map_type: precision f statistic depends on map type
    int_dict = {
        "number": (0, "int", "n"),
        "number_null": (1, "int", "null_cells"),
        "minimum": (3, "map_type", "min"),
        "maximum": (4, "map_type", "max"),
        "range": (5, "map_type", "range"),
        "average": (6, "double", "mean"),
        "average_abs": (7, "double", "mean_of_abs"),
        "stddev": (8, "double", "stddev"),
        "variance": (9, "double", "variance"),
        "coeff_var": (10, "double", "coeff_var"),
        "sum": (11, "map_type", "sum"),
        "first_quartile": (12, "map_type", "first_quartile"),
        "median": (13, "map_type", "median"),
        "third_quartile": (14, "map_type", "third_quartile"),
        "percentile": (15, "map_type", "percentile"),
    }

    if len(raster_maps) != len(column_prefix):
        grass.fatal(
            "Number of maps and number of column prefixes has to be equal!")

    # Generate list of required column names and types
    col_names = []
    valid_labels = []
    col_types = []
    for p in column_prefix:
        rmaptype, val_lab, rcats = raster_type(
            raster_maps[column_prefix.index(p)], tabulate, use_label)
        valid_labels.append(val_lab)

        for b in buffers:
            b_str = str(b).replace(".", "_")
            if tabulate:
                if rmaptype == "double precision":
                    grass.fatal(
                        "{} has floating point precision. Can only tabulate integer maps"
                        .format(raster_maps[column_prefix.index(p)]))
                col_names.append("{}_{}_b{}".format(p, "ncats", b_str))
                col_types.append("int")
                col_names.append("{}_{}_b{}".format(p, "mode", b_str))
                col_types.append("int")
                col_names.append("{}_{}_b{}".format(p, "null", b_str))
                col_types.append("double precision")
                col_names.append("{}_{}_b{}".format(p, "area_tot", b_str))
                col_types.append("double precision")

                for rcat in rcats:
                    if use_label and valid_labels:
                        rcat = rcat[0].replace(" ", "_")
                    else:
                        rcat = rcat[1]
                    col_names.append("{}_{}_b{}".format(p, rcat, b_str))
                    col_types.append("double precision")
            else:
                for m in methods:
                    col_names.append("{}_{}_b{}".format(
                        p, int_dict[m][2], b_str))
                    col_types.append(rmaptype if int_dict[m][1] ==
                                     "map_type" else int_dict[m][1])
                if percentile:
                    for perc in percentile:
                        col_names.append("{}_percentile_{}_b{}".format(
                            p,
                            int(perc) if (perc).is_integer() else perc, b_str))
                        col_types.append(rmaptype if int_dict[m][1] ==
                                         "map_type" else int_dict[m][1])

    # Open input vector map
    in_vect = VectorTopo(in_vector, layer=layer)
    in_vect.open(mode="r")

    # Get name for temporary map
    global TMP_MAPS
    TMP_MAPS.append(tmp_map)

    # Setup stats collectors
    if tabulate:
        # Collector for raster category statistics
        stats = Module("r.stats", run_=False, stdout_=PIPE)
        stats.inputs.sort = "desc"
        stats.inputs.null_value = "null"
        stats.flags.quiet = True
        stats.flags.l = True
        if percent:
            stats.flags.p = True
            stats.flags.n = True
        else:
            stats.flags.a = True
    else:
        # Collector for univariat statistics
        univar = Module("r.univar", run_=False, stdout_=PIPE)
        univar.inputs.separator = sep
        univar.flags.g = True
        univar.flags.quiet = True

        # Add extended statistics if requested
        if set(methods).intersection(
                set(["first_quartile", "median", "third_quartile"])):
            univar.flags.e = True

        if percentile is not None:
            univar.flags.e = True
            univar.inputs.percentile = percentile

    # Check if attribute table exists
    if not output:
        if not in_vect.table:
            grass.fatal(
                "No attribute table found for vector map {}".format(in_vect))

        # Modify table as needed
        tab = in_vect.table
        tab_name = tab.name
        tab_cols = tab.columns

        # Add required columns
        existing_cols = list(set(tab_cols.names()).intersection(col_names))
        if len(existing_cols) > 0:
            if not update:
                in_vect.close()
                grass.fatal(
                    "Column(s) {} already exist! Please use the u-flag \
                            if you want to update values in those columns".
                    format(",".join(existing_cols)))
            else:
                grass.warning("Column(s) {} already exist!".format(
                    ",".join(existing_cols)))
        for e in existing_cols:
            idx = col_names.index(e)
            del col_names[idx]
            del col_types[idx]
        tab_cols.add(col_names, col_types)

        conn = tab.conn
        cur = conn.cursor()

        sql_str_start = "UPDATE {} SET ".format(tab_name)

    elif output == "-":
        print("cat{0}raster_map{0}buffer{0}statistic{0}value".format(sep))
    else:
        out.write("cat{0}raster_map{0}buffer{0}statistic{0}value{1}".format(
            sep, os.linesep))

    # Get computational region
    grass.use_temp_region()
    r = Region()
    r.read()

    # Adjust region extent to buffer around geometry
    # reg = deepcopy(r)

    # Create iterator for geometries of all selected types
    geoms = chain()
    geoms_n = 0
    n_geom = 1
    for geom_type in types:
        geoms_n += in_vect.number_of(geom_type)
        if in_vect.number_of(geom_type) > 0:
            geoms = chain(in_vect.viter(geom_type))

    # Loop over geometries
    for geom in geoms:
        # Get cat
        cat = geom.cat

        # Add where clause to UPDATE statement
        sql_str_end = " WHERE cat = {};".format(cat)

        # Loop over ser provided buffer distances
        for buf in buffers:
            b_str = str(buf).replace(".", "_")
            # Buffer geometry
            if buf <= 0:
                buffer_geom = geom
            else:
                buffer_geom = geom.buffer(buf)
            # Create temporary vector map with buffered geometry
            tmp_vect = VectorTopo(tmp_map, quiet=True)
            tmp_vect.open(mode="w")
            tmp_vect.write(Boundary(points=buffer_geom[0].to_list()))
            # , c_cats=int(cat), set_cats=True
            if callable(buffer_geom[1]):
                tmp_vect.write(Centroid(x=buffer_geom[1]().x,
                                        y=buffer_geom[1]().y),
                               cat=int(cat))
            else:
                tmp_vect.write(Centroid(x=buffer_geom[1].x,
                                        y=buffer_geom[1].y),
                               cat=int(cat))

            #################################################
            # How to silence VectorTopo???
            #################################################

            # Save current stdout
            # original = sys.stdout

            # f = open(os.devnull, 'w')
            # with open('output.txt', 'w') as f:
            # sys.stdout = io.BytesIO()
            # sys.stdout.fileno() = os.devnull
            # sys.stderr = f
            # os.environ.update(dict(GRASS_VERBOSE='0'))
            tmp_vect.close(build=False)
            grass.run_command("v.build", map=tmp_map, quiet=True)
            # os.environ.update(dict(GRASS_VERBOSE='1'))

            # reg = Region()
            # reg.read()
            # r.from_vect(tmp_map)
            r = align_current(r, buffer_geom[0].bbox())
            r.write()

            # Check if the following is needed
            # needed specially with r.stats -p
            # grass.run_command('g.region', vector=tmp_map, flags='a')

            # Create a MASK from buffered geometry
            if user_mask:
                grass.run_command(
                    "v.to.rast",
                    input=tmp_map,
                    output=tmp_map,
                    use="val",
                    value=int(cat),
                    quiet=True,
                )
                mc_expression = (
                    "MASK=if(!isnull({0}) && !isnull({0}_MASK), {1}, null())".
                    format(tmp_map, cat))
                grass.run_command("r.mapcalc",
                                  expression=mc_expression,
                                  quiet=True)
            else:
                grass.run_command(
                    "v.to.rast",
                    input=tmp_map,
                    output="MASK",
                    use="val",
                    value=int(cat),
                    quiet=True,
                )

            # reg.write()

            updates = []
            # Compute statistics for every raster map
            for rm, rmap in enumerate(raster_maps):
                # rmap = raster_maps[rm]
                prefix = column_prefix[rm]

                if tabulate:
                    # Get statistics on occurrence of raster categories within buffer
                    stats.inputs.input = rmap
                    stats.run()
                    t_stats = (stats.outputs["stdout"].value.rstrip(
                        os.linesep).replace("  ", " ").replace(
                            "no data", "no_data").replace(
                                " ",
                                "_b{} = ".format(b_str)).split(os.linesep))
                    if t_stats == [""]:
                        grass.warning(
                            empty_buffer_warning.format(rmap, buf, cat))
                        continue
                    if (t_stats[0].split(
                            "_b{} = ".format(b_str))[0].split("_")[-1] !=
                            "null"):
                        mode = (t_stats[0].split(
                            "_b{} = ".format(b_str))[0].split("_")[-1])
                    elif len(t_stats) == 1:
                        mode = "NULL"
                    else:
                        mode = (t_stats[1].split(
                            "_b{} = ".format(b_str))[0].split("_")[-1])

                    if not output:
                        updates.append("\t{}_{}_b{} = {}".format(
                            prefix, "ncats", b_str, len(t_stats)))
                        updates.append("\t{}_{}_b{} = {}".format(
                            prefix, "mode", b_str, mode))

                        area_tot = 0
                        for l in t_stats:
                            # check if raster maps has category or not
                            if len(l.split("=")) == 2:
                                updates.append("\t{}_{}".format(
                                    prefix, l.rstrip("%")))
                            elif not l.startswith("null"):
                                vals = l.split("=")
                                updates.append("\t{}_{} = {}".format(
                                    prefix,
                                    vals[-2].strip()
                                    if valid_labels[rm] else vals[0].strip(),
                                    vals[-1].strip().rstrip("%"),
                                ))
                            if not l.startswith("null"):
                                area_tot += float(
                                    l.rstrip("%").split("= ")[-1])
                        if not percent:
                            updates.append("\t{}_{}_b{} = {}".format(
                                prefix, "area_tot", b_str, area_tot))

                    else:
                        out_str = "{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}".format(
                            sep, cat, prefix, buf, "ncats", len(t_stats),
                            os.linesep)
                        out_str += "{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}".format(
                            sep, cat, prefix, buf, "mode", mode, os.linesep)
                        area_tot = 0
                        for l in t_stats:
                            rcat = (l.split("= ")[1].rstrip(
                                "_b{} = ".format(b_str))
                                    if valid_labels[rm] else l.split("_")[0])
                            area = l.split("= ")[-1]
                            out_str += "{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}".format(
                                sep,
                                cat,
                                prefix,
                                buf,
                                "area {}".format(rcat),
                                area,
                                os.linesep,
                            )
                            if rcat != "null":
                                area_tot = area_tot + float(
                                    l.rstrip("%").split("= ")[-1])
                        if not percent:
                            out_str += "{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}".format(
                                sep,
                                cat,
                                prefix,
                                buf,
                                "area total",
                                area_tot,
                                os.linesep,
                            )

                        if output == "-":
                            print(out_str.rstrip(os.linesep))
                        else:
                            out.write(out_str)

                else:
                    # Get univariate statistics within buffer
                    univar.inputs.map = rmap
                    univar.run()
                    u_stats = (univar.outputs["stdout"].value.rstrip(
                        os.linesep).replace(
                            "=", "_b{} = ".format(b_str)).split(os.linesep))

                    # Test if u_stats is empty and give warning
                    # Needs to be adjusted to number of requested stats?
                    if ((percentile and len(u_stats) < 14)
                            or (univar.flags.e and len(u_stats) < 13)
                            or len(u_stats) < 12):
                        grass.warning(
                            empty_buffer_warning.format(rmap, buf, cat))
                        break

                    # Extract statistics for selected methods
                    for m in methods:
                        if not output:
                            # Add to list of UPDATE statements
                            updates.append("\t{}_{}".format(
                                prefix,
                                u_stats[int_dict[m][0]] if is_number(
                                    u_stats[int_dict[m][0]].split(" = ")[1])
                                else " = ".join([
                                    u_stats[int_dict[m][0]].split(" = ")[0],
                                    "NULL",
                                ]),
                            ))
                        else:
                            out_str = "{1}{0}{2}{0}{3}{0}{4}{0}{5}".format(
                                sep,
                                cat,
                                prefix,
                                buf,
                                m,
                                u_stats[int_dict[m][0]].split("= ")[1],
                            )
                            if output == "-":
                                print(out_str)
                            else:
                                out.write("{}{}".format(out_str, os.linesep))

                    if percentile:
                        perc_count = 0
                        for perc in percentile:
                            if not output:
                                updates.append(
                                    "{}_percentile_{}_b{} = {}".format(
                                        p,
                                        int(perc) if
                                        (perc).is_integer() else perc,
                                        b_str,
                                        u_stats[15 +
                                                perc_count].split("= ")[1],
                                    ))
                            else:
                                out_str = "{1}{0}{2}{0}{3}{0}{4}{0}{5}".format(
                                    sep,
                                    cat,
                                    prefix,
                                    buf,
                                    "percentile_{}".format(
                                        int(perc) if (
                                            perc).is_integer() else perc),
                                    u_stats[15 + perc_count].split("= ")[1],
                                )
                                if output == "-":
                                    print(out_str)
                                else:
                                    out.write(out_str)
                            perc_count = perc_count + 1

            if not output and len(updates) > 0:
                cur.execute("{}{}{}".format(sql_str_start, ",\n".join(updates),
                                            sql_str_end))

            # Remove temporary maps
            # , stderr=os.devnull, stdout_=os.devnull)
            grass.run_command("g.remove",
                              flags="f",
                              type="raster",
                              name="MASK",
                              quiet=True)
            grass.run_command("g.remove",
                              flags="f",
                              type="vector",
                              name=tmp_map,
                              quiet=True)

        # Give progress information
        grass.percent(n_geom, geoms_n, 1)
        n_geom = n_geom + 1

        if not output:
            conn.commit()

    # Close cursor and DB connection
    if not output and not output == "-":
        cur.close()
        conn.close()
        # Update history
        grass.vector.vector_history(in_vector)
    elif output != "-":
        # write results to file
        out.close()

    if remove and not output:
        dropcols = []
        selectnum = "select count({}) from {}"
        for i in col_names:
            thisrow = grass.read_command("db.select",
                                         flags="c",
                                         sql=selectnum.format(i, in_vector))
            if int(thisrow) == 0:
                dropcols.append(i)
        grass.debug("Columns to delete: {}".format(", ".join(dropcols)),
                    debug=2)
        if dropcols:
            grass.run_command("v.db.dropcolumn",
                              map=in_vector,
                              columns=dropcols)
Ejemplo n.º 7
0
def main():
    in_vector = options['input'].split('@')[0]
    if len(options['input'].split('@')) > 1:
        in_mapset = options['input'].split('@')[1]
    else:
        in_mapset = None
    raster_maps = options['raster'].split(',')   # raster file(s) to extract from
    output = options['output']
    methods = tuple(options['methods'].split(','))
    percentile = None if options['percentile'] == '' else map(float, options['percentile'].split(','))
    column_prefix = tuple(options['column_prefix'].split(','))
    buffers = options['buffers'].split(',')
    types = options['type'].split(',')
    layer = options['layer']
    sep = options['separator']
    update = flags['u']
    tabulate = flags['t']
    percent = flags['p']
    remove = flags['r']
    use_lable = False

    empty_buffer_warning = 'No data in raster map {} within buffer {} around geometry {}'

    # Do checks using pygrass
    for rmap in raster_maps:
        r_map = RasterAbstractBase(rmap)
        if not r_map.exist():
            grass.fatal('Could not find raster map {}.'.format(rmap))

    user_mask = False
    m_map = RasterAbstractBase('MASK', Mapset().name)
    if m_map.exist():
        grass.warning("Current MASK is temporarily renamed.")
        user_mask = True
        unset_mask()

    invect = VectorTopo(in_vector)
    if not invect.exist():
        grass.fatal("Vector file {} does not exist".format(in_vector))

    if output:
        if output == '-':
            out = None
        else:
            out = open(output, 'w')

    # Check if input map is in current mapset (and thus editable)
    if in_mapset and unicode(in_mapset) != unicode(Mapset()):
        grass.fatal("Input vector map is not in current mapset and cannot be modified. \
                    Please consider copying it to current mapset.".format(output))

    buffers = []
    for buf in options['buffers'].split(','):
        try:
            b = float(buf)
            if b.is_integer():
                buffers.append(int(b))
            else:
                buffers.append(b)
        except:
            grass.fatal('')
        if b < 0:
            grass.fatal("Negative buffer distance not supported!")

    ### Define column types depenting on statistic, map type and
    ### DB backend (SQLite supports only double and not real)
    # int: statistic produces allways integer precision
    # double: statistic produces allways floating point precision
    # map_type: precision f statistic depends on map type
    int_dict = {'number': (0, 'int', 'n'),
                'number_null': (1, 'int', 'null_cells'),
                'minimum': (3, 'map_type', 'min'),
                'maximum': (4, 'map_type', 'max'),
                'range': (5, 'map_type', 'range'),
                'average': (6, 'double', 'mean'),
                'average_abs': (7, 'double', 'mean_of_abs'),
                'stddev': (8, 'double', 'stddev'),
                'variance': (9, 'double', 'variance'),
                'coeff_var': (10, 'double', 'coeff_var'),
                'sum': (11, 'map_type', 'sum'),
                'first_quartile': (12, 'map_type', 'first_quartile'),
                'median': (13, 'map_type', 'median'),
                'third_quartile': (14, 'map_type', 'third_quartile'),
                'percentile': (15, 'map_type', 'percentile')}

    if len(raster_maps) != len(column_prefix):
        grass.fatal('Number of maps and number of column prefixes has to be equal!')

    # Generate list of required column names and types
    col_names = []
    col_types = []
    for p in column_prefix:
        rmaptype, rcats = raster_type(raster_maps[column_prefix.index(p)], tabulate, use_lable)
        for b in buffers:
            b_str = str(b).replace('.', '_')
            if tabulate:
                if rmaptype == 'double precision':
                    grass.fatal('{} has floating point precision. Can only tabulate integer maps'.format(raster_maps[column_prefix.index(p)]))
                col_names.append('{}_{}_b{}'.format(p, 'ncats', b_str))
                col_types.append('int')
                col_names.append('{}_{}_b{}'.format(p, 'mode', b_str))
                col_types.append('int')
                col_names.append('{}_{}_b{}'.format(p, 'null', b_str))
                col_types.append('double precision')
                col_names.append('{}_{}_b{}'.format(p, 'area_tot', b_str))
                col_types.append('double precision')
                for rcat in rcats:
                    if use_lable:
                        rcat = rcat[1].replace(" ", "_")
                    else:
                        rcat = rcat[0]
                    col_names.append('{}_{}_b{}'.format(p, rcat, b_str))
                    col_types.append('double precision')
            else:
                for m in methods:
                    col_names.append('{}_{}_b{}'.format(p, int_dict[m][2], b_str))
                    col_types.append(rmaptype if int_dict[m][1] == 'map_type' else int_dict[m][1])
                if percentile:
                    for perc in percentile:
                        col_names.append('{}_percentile_{}_b{}'.format(p,
                                                                       int(perc) if (perc).is_integer() else perc,
                                                                       b_str))
                        col_types.append(rmaptype if int_dict[m][1] == 'map_type' else int_dict[m][1])

    # Open input vector map
    in_vect = VectorTopo(in_vector, layer=layer)
    in_vect.open(mode='r')

    # Get name for temporary map
    TMP_MAPS.append(tmp_map)

    # Setup stats collectors
    if tabulate:
        # Collector for raster category statistics
        stats = Module('r.stats', run_=False, stdout_=PIPE)
        stats.inputs.sort = 'desc'
        stats.inputs.null_value = 'null'
        stats.flags.quiet = True
        if percent:
            stats.flags.p = True
            stats.flags.n = True
        else:
            stats.flags.a = True
    else:
        # Collector for univariat statistics
        univar = Module('r.univar', run_=False, stdout_=PIPE)
        univar.inputs.separator = sep
        univar.flags.g = True
        univar.flags.quiet = True

        # Add extended statistics if requested
        if set(methods).intersection(set(['first_quartile',
                                          'median', 'third_quartile'])):
            univar.flags.e = True

        if percentile is not None:
            univar.flags.e = True
            univar.inputs.percentile = percentile

    # Check if attribute table exists
    if not output:
        if not in_vect.table:
            grass.fatal('No attribute table found for vector map {}'.format(in_vect))

        # Modify table as needed
        tab = in_vect.table
        tab_name = tab.name
        tab_cols = tab.columns

        # Add required columns
        existing_cols = list(set(tab_cols.names()).intersection(col_names))
        if len(existing_cols) > 0:
            if not update:
                grass.fatal('Column(s) {} already exist! Please use the u-flag \
                            if you want to update values in those columns'.format(','.join(existing_cols)))
            else:
                grass.warning('Column(s) {} already exist!'.format(','.join(existing_cols)))
        for e in existing_cols:
            idx = col_names.index(e)
            del col_names[idx]
            del col_types[idx]
        tab_cols.add(col_names, col_types)

        conn = tab.conn
        cur = conn.cursor()

        sql_str_start = 'UPDATE {} SET '.format(tab_name)

    elif output == '-':
        print('cat{0}raster_map{0}buffer{0}statistic{0}value'.format(sep))
    else:
        out.write('cat{0}raster_map{0}buffer{0}statistic{0}value{1}'.format(sep, os.linesep))


    # Get computational region
    grass.use_temp_region()
    r = Region()
    r.read()

    # Adjust region extent to buffer around geometry
    #reg = deepcopy(r)

    # Create iterator for geometries of all selected types
    geoms = chain()
    geoms_n = 0
    n_geom = 1
    for geom_type in types:
        geoms_n += in_vect.number_of(geom_type)
        if in_vect.number_of(geom_type) > 0:
            geoms = chain(in_vect.viter(geom_type))

    # Loop over geometries
    for geom in geoms:
        # Get cat
        cat = geom.cat

        # Add where clause to UPDATE statement
        sql_str_end = ' WHERE cat = {};'.format(cat)

        # Loop over ser provided buffer distances
        for buf in buffers:
            b_str = str(buf).replace('.', '_')
            # Buffer geometry
            if buf <= 0:
                buffer_geom = geom
            else:
                buffer_geom = geom.buffer(buf)
            # Create temporary vector map with buffered geometry
            tmp_vect = VectorTopo(tmp_map, quiet=True)
            tmp_vect.open(mode='w')
            #print(int(cat))
            tmp_vect.write(Boundary(points=buffer_geom[0].to_list()))
            # , c_cats=int(cat), set_cats=True
            tmp_vect.write(Centroid(x=buffer_geom[1].x,
                                    y=buffer_geom[1].y), cat=int(cat))

            #################################################
            # How to silence VectorTopo???
            #################################################

            # Save current stdout
            #original = sys.stdout

            #f = open(os.devnull, 'w')
            #with open('output.txt', 'w') as f:
            #sys.stdout = io.BytesIO()
            #sys.stdout.fileno() = os.devnull
            #sys.stderr = f
            #os.environ.update(dict(GRASS_VERBOSE='0'))
            tmp_vect.close(build=False)
            grass.run_command('v.build', map=tmp_map, quiet=True)
            #os.environ.update(dict(GRASS_VERBOSE='1'))

            #reg = Region()
            #reg.read()
            #r.from_vect(tmp_map)
            r = align_current(r, buffer_geom[0].bbox())
            r.write()

            # Check if the following is needed
            # needed specially with r.stats -p
            #grass.run_command('g.region', vector=tmp_map, flags='a')

            # Create a MASK from buffered geometry
            if user_mask:
                grass.run_command('v.to.rast', input=tmp_map,
                                  output=tmp_map, use='val',
                                  value=int(cat), quiet=True)
                mc_expression = "MASK=if(!isnull({0}) && !isnull({0}_MASK), {1}, null())".format(tmp_map, cat)
                grass.run_command('r.mapcalc', expression=mc_expression, quiet=True)
            else:
                grass.run_command('v.to.rast', input=tmp_map,
                                  output='MASK', use='val',
                                  value=int(cat), quiet=True)

            #reg.write()

            updates = []
            # Compute statistics for every raster map
            for rm in range(len(raster_maps)):
                rmap = raster_maps[rm]
                prefix = column_prefix[rm]

                if tabulate:
                    # Get statistics on occurrence of raster categories within buffer
                    stats.inputs.input = rmap
                    stats.run()
                    t_stats = stats.outputs['stdout'].value.rstrip(os.linesep).replace(' ', '_b{} = '.format(b_str)).split(os.linesep)

                    if t_stats[0].split('_b{} = '.format(b_str))[0].split('_')[-1] != 'null':
                        mode = t_stats[0].split('_b{} = '.format(b_str))[0].split('_')[-1]
                    elif len(t_stats) == 1:
                        mode = 'NULL'
                    else:
                        mode = t_stats[1].split('_b{} = '.format(b_str))[0].split('_')[-1]

                    if not output:
                        updates.append('\t{}_{}_b{} = {}'.format(prefix, 'ncats', b_str, len(t_stats)))
                        updates.append('\t{}_{}_b{} = {}'.format(prefix, 'mode', b_str, mode))

                        area_tot = 0
                        for l in t_stats:
                            updates.append('\t{}_{}'.format(prefix, l.rstrip('%')))
                            if l.split('_b{} ='.format(b_str))[0].split('_')[-1] != 'null':
                                area_tot = area_tot + float(l.rstrip('%').split('= ')[1])
                        if not percent:
                            updates.append('\t{}_{}_b{} = {}'.format(prefix, 'area_tot', b_str, area_tot))

                    else:
                        out_str = '{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}'.format(sep, cat, prefix, buf, 'ncats', len(t_stats), os.linesep)
                        out_str += '{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}'.format(sep, cat, prefix, buf, 'mode', mode, os.linesep)
                        area_tot = 0
                        if not t_stats[0]:
                            grass.warning(empty_buffer_warning.format(rmap, buf, cat))
                            continue
                        for l in t_stats:
                            rcat = l.split('_b{} ='.format(b_str))[0].split('_')[-1]
                            area = l.split('= ')[1]
                            out_str += '{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}'.format(sep, cat, prefix, buf, 'area {}'.format(rcat), area, os.linesep)
                            if rcat != 'null':
                                area_tot = area_tot + float(l.rstrip('%').split('= ')[1])
                        out_str += '{1}{0}{2}{0}{3}{0}{4}{0}{5}{6}'.format(sep, cat, prefix, buf, 'area_tot', area_tot, os.linesep)

                        if output == '-':
                            print(out_str.rstrip(os.linesep))
                        else:
                            out.write(out_str)

                else:
                    # Get univariate statistics within buffer
                    univar.inputs.map = rmap
                    univar.run()
                    u_stats = univar.outputs['stdout'].value.rstrip(os.linesep).replace('=', '_b{} = '.format(b_str)).split(os.linesep)

                    # Test if u_stats is empty and give warning
                    # Needs to be adjusted to number of requested stats?
                    if (percentile and len(u_stats) < 14) or (univar.flags.e and len(u_stats) < 13) or len(u_stats) < 12:
                        grass.warning(empty_buffer_warning.format(rmap, buf, cat))
                        break

                    # Extract statistics for selected methods
                    for m in methods:
                        if not output:
                            # Add to list of UPDATE statements
                            updates.append('\t{}_{}'.format(prefix,
                                                                     u_stats[int_dict[m][0]]))
                        else:
                            out_str = '{1}{0}{2}{0}{3}{0}{4}{0}{5}'.format(sep, cat, prefix, buf, m, u_stats[int_dict[m][0]].split('= ')[1])
                            if output == '-':
                                print(out_str)
                            else:
                                out.write("{}{}".format(out_str, os.linesep))

                    if percentile:
                        perc_count = 0
                        for perc in percentile:
                            if not output:
                                updates.append('{}_percentile_{}_b{} = {}'.format(p,
                                                                                  int(perc) if (perc).is_integer() else perc,
                                                                                  b_str, u_stats[15+perc_count].split('= ')[1]))
                            else:
                                out_str = '{1}{0}{2}{0}{3}{0}{4}{0}{5}'.format(sep, cat, prefix, buf, 'percentile_{}'.format(int(perc) if (perc).is_integer() else perc), u_stats[15+perc_count].split('= ')[1])
                                if output == '-':
                                    print(out_str)
                                else:
                                    out.write(out_str)
                            perc_count = perc_count + 1

            if not output and len(updates) > 0:
                cur.execute('{}{}{}'.format(sql_str_start,
                                            ',\n'.join(updates),
                                            sql_str_end))

            # Remove temporary maps
            #, stderr=os.devnull, stdout_=os.devnull)
            grass.run_command('g.remove', flags='f', type='raster',
                              name='MASK', quiet=True)
            grass.run_command('g.remove', flags='f', type='vector',
                              name=tmp_map, quiet=True)

        # Give progress information
        grass.percent(n_geom, geoms_n, 1)
        n_geom = n_geom + 1

        if not output:
            conn.commit()

    # Close cursor and DB connection
    if not output and not output == "-":
        cur.close()
        conn.close()
        # Update history
        grass.vector.vector_history(in_vector)
    elif output != "-":
        # write results to file
        out.close()

    if remove:
        dropcols = []
        selectnum = 'select count({}) from {}'
        for i in col_names:
            thisrow = grass.read_command('db.select', flags='c',
                                         sql=selectnum.format(i, in_vector))
            if int(thisrow) == 0:
                dropcols.append(i)
        grass.debug("Columns to delete: {}".format(', '.join(dropcols)),
                    debug=2)
        grass.run_command('v.db.dropcolumn', map=in_vector, columns=dropcols)
Ejemplo n.º 8
0
def main():
    dem = options['dem']
    TR = options['time']  #TODO Time of concentration
    outlet = options['outlets']
    outlets = outlet.split(',')
    cleanTemporary = options['clean']
    try:
        TR = int(TR)
    except:
        print 'TR is not a number'
        sys.exit()

    if cleanTemporary != 'no':
        grass.run_command(
            'g.remove',
            flags='f',
            type='raster',
            name=
            'main_stream,basin,circle,drainage,horton,raster_streams,slope_drain_into'
        )

        grass.run_command('g.remove',
                          flags='f',
                          type='vector',
                          name='main_stream,nodes,outlet')
        grass.run_command('g.remove',
                          type='vector',
                          pattern='main_stream*',
                          flags='f')

    grass.use_temp_region()
    #get region in order to estimate the threshold as 1/1000 of total cells
    grass.run_command('g.region', raster=dem)

    regione = grass.region()
    thrshold = float(regione['cells']) / 300
    #stream and drainage determination
    grass.run_command('r.watershed',
                      elevation=dem,
                      threshold=700,
                      stream='raster_streams',
                      drainage='drainage',
                      overwrite=True,
                      flags='s')

    #the radius is little more than the current resolution
    radius = regione['nsres'] * 1.4
    grass.run_command('r.circle',
                      output='circle',
                      coordinate=outlet,
                      max=radius,
                      overwrite=True)  #%(str(outlets[0]),str(outlets[1]))
    #get the distances and take the shortest distance
    distances = grass.read_command('r.distance', map='circle,raster_streams')
    list_dist = distances.split('\n')
    list_dist.remove('')
    list_tuple = []
    for distance in list_dist:
        dist = distance.split(':')
        my_tupla = dist[0], dist[1], float(
            dist[2]), dist[3], dist[4], dist[5], dist[6]
        list_tuple.append(my_tupla)
    tuple_orderedByDistance = sorted(list_tuple,
                                     key=lambda distanza: distanza[2])
    del (distances, list_tuple, list_dist)
    #calculate the basin and read its statistics
    outlet = tuple_orderedByDistance[0][-2:]
    xoutlet = float(outlet[0])
    youtlet = float(outlet[1])
    grass.run_command('r.water.outlet',
                      input='drainage',
                      output='basin',
                      coordinates=str(xoutlet) + ',' + str(youtlet),
                      overwrite=True)
    statistics = grass.read_command('r.univar', map=dem, zones='basin')
    main_stat = statistics.splitlines()[-9:]

    #order the stream network
    grass.run_command('r.mask', raster='basin')
    grass.run_command('r.stream.order',
                      stream_rast='raster_streams',
                      direction='drainage',
                      elevation=dem,
                      horton='horton',
                      overwrite=True)
    stream_stat = grass.read_command('r.stream.stats',
                                     stream_rast='horton',
                                     direction='drainage',
                                     elevation=dem,
                                     flags='o')
    network_statistics = stream_stat.split('\n')
    network_statistics.remove('')
    #get the max order
    network_statistics[-1].split()
    total_length = float(network_statistics[-1].split(',')[2])
    area_basin = float(network_statistics[-1].split(',')[3])
    #area_basin in km2
    area_basin_Ha = area_basin * 100
    mean_elev = float(main_stat[3].split(':')[-1])
    min_elev = float(main_stat[0].split(':')[-1])
    max_elev = float(main_stat[1].split(':')[-1])
    deltaH = max_elev - min_elev
    average_slope = float(network_statistics[-1].split(',')[4])
    grass.run_command('r.mask', flags='r')

    TcGiandotti = (4 * np.sqrt(area_basin) +
                   1.5 * total_length) / (0.8 * np.sqrt(mean_elev - min_elev))

    TcKirpich = 0.945 * (total_length**3. / deltaH)**0.385

    if area_basin_Ha > 1000:  #TODO controlla i riferimenti
        corrivazione = TcGiandotti
        grass.info('using giandotti')
        grass.info(str(TcGiandotti))
        formula = 'Giandotti'
    else:
        formula = 'Kirpich'
        corrivazione = TcKirpich
        grass.info('using Kirpich')
        grass.info(str(TcKirpich))
    if corrivazione < 24:
        aPar = 'a24@PERMANENT'
        bPar = 'b24@PERMANENT'
        kPar = 'k24@PERMANENT'
    else:
        aPar = 'a15@PERMANENT'
        bPar = 'b15@PERMANENT'
        kPar = 'k15@PERMANENT'
    CNmap = 'CN@PERMANENT'

    aStat = grass.read_command('r.univar', map=aPar, zones='basin')
    aMain_stat = aStat.splitlines()[12].split(':')[-1]
    aMain_stat = float(aMain_stat)
    bStat = grass.read_command('r.univar', map=bPar, zones='basin')
    bMain_stat = bStat.splitlines()[12].split(':')[-1]
    bMain_stat = float(bMain_stat)
    kStat = grass.read_command('r.univar', map=kPar, zones='basin')
    kMain_stat = kStat.splitlines()[12].split(':')[-1]
    kMain_stat = float(kMain_stat)
    CNstat = grass.read_command('r.univar', map=CNmap, zones='basin')
    CN = CNstat.splitlines()[12].split(':')[-1]
    CN = float(CN)

    g.message('area basin in km2: ')
    print area_basin
    print 'mean elev: '
    print mean_elev - min_elev
    print 'delta H:'
    print deltaH
    print 'total reach length: '
    print total_length
    print 'a mean:'
    print aMain_stat
    print '\n b mean: '
    print bMain_stat
    print '\n k mean: '
    print kMain_stat
    print 'CN mean:'
    print CN

    ##### ------------------------- ##### modifica per verifca da togliere
    # ~ corrivazione=3.
    # ~ aMain_stat=32.5
    # ~ bMain_stat=0.33
    # ~ kMain_stat=0.42
    # ~ CN=91.
    # ~ area_basin=61.5
    # ~ area_basin_Ha=area_basin*100
    CN = 70.12 / 82.63 * CN
    #CN = 78.6
    #####--------------------------#####

    f_K_T = 1 - kMain_stat * (0.45 + 0.799 * np.log(-np.log(1 - 1. / TR)))
    print 'f(k,T): '
    print f_K_T

    h = f_K_T * aMain_stat * corrivazione**bMain_stat
    print '\n h main:'
    print h
    X1 = 100 * corrivazione / (0.236 + 0.062 * corrivazione)
    X2 = 0.003 * corrivazione + 0.0234
    Pa = 100 - area_basin_Ha / (X1 + X2 * area_basin_Ha)
    Ha = h * Pa / 100
    S1 = (1000. / CN) - 10
    Pn = (Ha - 5.08 * S1)**2 / (Ha + 20.32 * S1)
    Qc = (1 / 360.) * Pn * area_basin_Ha / corrivazione

    print 'discharge: '
    print Qc

    #print table.columns.types()
    #[u'INTEGER', u'TEXT', u'integer', u'double precision']
    '''
	------------------------------
	START CALCULATION OF LOCAL UPSTREAM SLOPE
	------------------------------
	'''
    #offsets for moving windows
    offsets = [
        d for j in xrange(1, 1 + 1) for i in [j, -j]
        for d in [(i, 0), (0, i), (i, i), (i, -i)]
    ]
    #rename dtm as elevation for future calculation if not exist
    if not VectorTopo('elevation').exist():
        grass.run_command('g.rename', raster="%s,elevation" % dem)
        elev_renamed = True

    #define drainage direction
    drainage_incoming = [2, 4, 3, 1, 6, 8, 7, 5]
    drainage_outcoming = []
    diag_dist = (regione['nsres']**2 + regione['ewres']**2)**0.5
    # [(1, 0), (0, 1), (1, 1), (1, -1), (-1, 0), (0, -1), (-1, -1), (-1, 1),
    cell_dists = [
        regione['nsres'], regione['ewres'], diag_dist, diag_dist,
        regione['nsres'], regione['ewres'], diag_dist, diag_dist
    ]
    # define the calculation term
    terms = [
        "(drainage[%d,%d] == %d && not(isnull(raster_streams[0,0])) && not(isnull(raster_streams[%d,%d])) )"
        % ((offsets[j] + tuple([drainage_incoming[j]]) + offsets[j]))
        for j in range(len(drainage_incoming))
    ]

    #define the operation expression
    terms_calc = [
        "(elevation[%d,%d] - elevation) * %s" % (offsets[j] + (terms[j], ))
        for j in range(len(terms))
    ]

    terms_calc_slope = [
        "( (elevation[%d,%d] - elevation)/%10.4f ) * %s" %
        (offsets[j] + (cell_dists[j], ) + (terms[j], ))
        for j in range(len(terms))
    ]

    expr = "num_cells_drain_into = (%s)" % " + ".join(terms)
    expr1 = "elevation_percentile4 = if(isnull(raster_streams),null(),(%s))" % " + ".join(
        terms)
    expr2 = "elevdiff_drain_into = %s" % " + ".join(terms_calc)
    expr3 = "slope_drain_into = %s" % " + ".join(terms_calc_slope)

    # do the r.mapcalc calculation with the moving window
    # exclude the num_cell_calculation_into
    #grass.mapcalc( expr )
    #print expr2
    #grass.mapcalc(  expr2 , overwrite=True)
    #print expr3
    #grass.mapcalc(  expr3 , overwrite=True)
    '''
	------------------------------
	START CALCULATION OF 2KM UPSTREAM SLOPE
	------------------------------
	'''
    #create an outlet vector
    new = VectorTopo('outlet')
    COLS = [(u'cat', 'INTEGER PRIMARY KEY')]
    new.open('w', tab_name='outlet', tab_cols=COLS)
    new.write(
        Point(xoutlet, youtlet),
        cat=1,
    )
    new.table.conn.commit()
    new.table.execute().fetchall()
    new.close()

    new = VectorTopo('output')
    COLS = [(u'cat', 'INTEGER PRIMARY KEY'),
            (u'discharge', u'double precision')]

    if new.exist():
        g.message('The vector exist: it will be named as')
        new_name = 'output' + str(datetime.datetime.now().time())[:8].replace(
            ':', '_')
        grass.info(new_name)
        new = VectorTopo(new_name)
        new.open('w', tab_name=new_name, tab_cols=COLS)
    else:
        new.open('w', tab_name='output', tab_cols=COLS)

    new.write(Point(xoutlet, youtlet), (float(Qc), ))
    new.table.conn.commit()
    new.table.execute().fetchall()
    new.close()
    mean_elev_abs = mean_elev - min_elev
    FirsPage(formula=formula,
             xoutlet=xoutlet,
             youtlet=youtlet,
             Tc=corrivazione,
             AreaBasin=area_basin_Ha,
             ChLen=total_length * 1000.,
             MeanElev=mean_elev_abs,
             a=aMain_stat,
             b=bMain_stat,
             k=kMain_stat,
             TR=TR,
             f=f_K_T,
             h=h,
             h_ar=Ha,
             x1=X1,
             x2=X2,
             Pa=Pa,
             CN=CN,
             S1=S1,
             Pn=Pn,
             Qc=Qc,
             dropElev=deltaH)

    #cleaning part
    if elev_renamed:
        grass.run_command('g.rename', raster='elevation,%s' % dem)
    grass.del_temp_region()
    grass.run_command('r.to.vect',
                      input='basin',
                      output='basin1',
                      type='area',
                      overwrite=True)
    if cleanTemporary != 'no':
        grass.run_command(
            'g.remove',
            flags='f',
            type='raster',
            name=
            'main_stream,basin,circle,drainage,horton,raster_streams,slope_drain_into'
        )

        grass.run_command('g.remove',
                          flags='f',
                          type='vector',
                          name='main_stream,nodes,outlet')
        grass.run_command('g.remove',
                          type='vector',
                          pattern='main_stream*',
                          flags='f')
Ejemplo n.º 9
0
def create_maps(parsed_obs, offering, secondsGranularity, resolution):
    """
    Create raster maps representing offerings, observed props and procedures
    :param parsed_obs: Observations for a given offering in geoJSON format
    :param offering: A collection of sensors used to conveniently group them up
    :param secondsGranularity: Granularity in seconds
    :param resolution: 2D grid resolution for rasterization
    """

    timestampPattern = '%Y-%m-%dT%H:%M:%S'  # TODO: Timezone
    startTime = options['event_time'].split('+')[0]
    epochS = int(time.mktime(time.strptime(startTime, timestampPattern)))
    endTime = options['event_time'].split('+')[1].split('/')[1]
    epochE = int(time.mktime(time.strptime(endTime, timestampPattern)))

    for key, observation in parsed_obs.iteritems():
        print('Creating raster maps for offering '
              '{}, observed property {}'.format(offering, key))

        data = json.loads(observation)

        cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'name', 'VARCHAR'),
                (u'value', 'DOUBLE')]

        geometries = dict()
        intervals = {}
        for secondsStamp in range(epochS, epochE + 1, secondsGranularity):
            intervals.update({secondsStamp: dict()})

        timestampPattern = 't%Y%m%dT%H%M%S'  # TODO: Timezone

        for a in data['features']:
            name = a['properties']['name']
            geometries.update({name: a['geometry']['coordinates']})

            for timestamp, value in a['properties'].iteritems():
                if timestamp != 'name':
                    observationStartTime = timestamp[:-4]
                    secondsTimestamp = int(
                        time.mktime(
                            time.strptime(observationStartTime,
                                          timestampPattern)))
                    for interval in intervals.keys():
                        if secondsTimestamp >= interval \
                                and secondsTimestamp < (
                                            interval + secondsGranularity):
                            if name in intervals[interval].keys():
                                intervals[interval][name].append(float(value))
                            else:
                                intervals[interval].update(
                                    {name: [float(value)]})
                            break

        for interval in intervals.keys():
            if len(intervals[interval]) != 0:
                timestamp = datetime.datetime.fromtimestamp(interval).strftime(
                    't%Y%m%dT%H%M%S')

                tableName = '{}_{}_{}_{}'.format(options['output'], offering,
                                                 key, timestamp)
                if ':' in tableName:
                    tableName = '_'.join(tableName.split(':'))
                if '-' in tableName:
                    tableName = '_'.join(tableName.split('-'))
                if '.' in tableName:
                    tableName = '_'.join(tableName.split('.'))

                new = VectorTopo(tableName)
                if overwrite() is True:
                    try:
                        new.remove()
                    except:
                        pass

                new.open(mode='w',
                         layer=1,
                         tab_name=tableName,
                         link_name=tableName,
                         tab_cols=cols,
                         overwrite=True)
                i = 0
                for procedure, values in intervals[interval].iteritems():
                    if new.exist() is False:
                        i = 1
                    else:
                        i += 1

                    if options['method'] == 'average':
                        value = sum(values) / len(values)
                    elif options['method'] == 'sum':
                        value = sum(values)
                    # TODO: Other aggregations methods

                    new.write(Point(*geometries[procedure]),
                              cat=i,
                              attrs=(
                                  procedure,
                                  value,
                              ))

                new.table.conn.commit()

                new.close(build=False)
                run_command('v.build', quiet=True, map=tableName)

                if options['bbox'] == '':
                    run_command('g.region', vect=tableName, res=resolution)

                run_command('v.to.rast',
                            input=tableName,
                            output=tableName,
                            use='attr',
                            attribute_column='value',
                            layer=1,
                            label_column='name',
                            type='point',
                            quiet=True)

                if flags['k'] is False:
                    run_command('g.remove',
                                'f',
                                type='vector',
                                name=tableName,
                                quiet=True)
Ejemplo n.º 10
0
def main():
	punti=options['points']
	dem=options['dem']
	TR=options['time'] 
	outlet=options['outlets']
	outlets = outlet.split(',')
	try:
		TR=int(TR)
	except:
		print 'TR is not a number'
		sys.exit()
	grass.use_temp_region()
	
	#test to access points
	new_points = VectorTopo(punti)
	if new_points.exist():
		points_str=grass.read_command('v.to.db', map=punti,type='point',option='coor',flags='p')
		points_list = points_str.split('\n')
		points_list.remove('')
		
	else:
		print 'File %s does not exist' % punti
		sys.exit()
	
	#get region in order to estimate the threshold as 1/1000 of total cells
	grass.run_command('g.region',raster=dem)
	
	regione=grass.region()
	thrshold=float(regione['cells'])/300
	#stream and drainage determination
	grass.run_command('r.watershed', elevation=dem, threshold=500, stream='raster_streams', drainage='drainage',overwrite=True,flags='s')
	
	#the radius is little more than the current resolution
	radius=regione['nsres']*1.4
	
	output_points = []
		
	
	'''
	STARTIN CICLE ON EACH OUTLET IN LIST
	'''

	category=1
	for outlet in points_list[1:]:
		outlet = outlet.split('|')[1:-1]
		print ', '.join(outlet)
		grass.run_command('g.region',raster=dem)
		grass.run_command('r.circle', output='circle', coordinate=','.join(outlet), max=radius,overwrite=True) 
		#get the distances and take the shortest distance
		distances=grass.read_command('r.distance', map='circle,raster_streams')
		list_dist=distances.split('\n')
		list_dist.remove('')
		list_tuple=[]
		for distance in list_dist:
			dist=distance.split(':')
			my_tupla=dist[0],dist[1],float(dist[2]),dist[3],dist[4],dist[5],dist[6]
			list_tuple.append(my_tupla)
		tuple_orderedByDistance=sorted(list_tuple, key=lambda distanza: distanza[2])
		del(distances,list_tuple,list_dist)
		#calculate the basin and read its statistics
		outlet=tuple_orderedByDistance[0][-2:]
		xoutlet=float(outlet[0])
		youtlet=float(outlet[1])
		grass.run_command('r.water.outlet',input='drainage',output='basin',coordinates=str(xoutlet)+','+str(youtlet) , overwrite=True)
		statistics=grass.read_command('r.univar',map=dem, zones='basin')
		main_stat=statistics.splitlines()[-9:]
		
		
		#order the stream network
		grass.run_command('r.mask',raster='basin')
		grass.run_command('r.stream.order',stream_rast='raster_streams', direction='drainage', elevation=dem,horton='horton',overwrite=True)
		stream_stat=grass.read_command('r.stream.stats', stream_rast='horton', direction='drainage', elevation=dem,flags='o')
		network_statistics=stream_stat.split('\n')
		network_statistics.remove('')
		#get the max order
		network_statistics[-1].split()
		total_length=float(network_statistics[-1].split(',')[2])
		area_basin=float(network_statistics[-1].split(',')[3])
		area_basin_Ha=area_basin*100
		mean_elev=float(main_stat[3].split(':')[-1])
		min_elev=float(main_stat[0].split(':')[-1])
		max_elev=float(main_stat[1].split(':')[-1])
		deltaH=max_elev-min_elev
		average_slope=float(network_statistics[-1].split(',')[4])
		grass.run_command('r.mask',flags='r')
		
		TcGiandotti=(4*np.sqrt(area_basin)+1.5*total_length)/(0.8*np.sqrt(mean_elev-min_elev))			
		TcKirpich=0.945*(total_length**3./deltaH)**0.385
		
		if area_basin_Ha > 1000: #TODO controlla i riferimenti
			corrivazione = TcGiandotti
			grass.info('using giandotti')
			grass.info(str(TcGiandotti))
			formula = 'Giandotti'
		else:
			corrivazione = TcKirpich
			formula = 'Kirpich'
			grass.info('using Kirpich')
			grass.info(str(TcKirpich))
		if corrivazione < 24:
			aPar='a24@PERMANENT'
			bPar='b24@PERMANENT'
			kPar='k24@PERMANENT'
		else:
			aPar='a15@PERMANENT'
			bPar='b15@PERMANENT'
			kPar='k15@PERMANENT'
		CNmap = 'CN@PERMANENT'
		
		corrivazione=TcGiandotti
		aStat=grass.read_command('r.univar',map=aPar, zones='basin')
		aMain_stat=aStat.splitlines()[12].split(':')[-1]	
		aMain_stat=float(aMain_stat)
		bStat=grass.read_command('r.univar',map=bPar, zones='basin')
		bMain_stat=bStat.splitlines()[12].split(':')[-1]	
		bMain_stat=float(bMain_stat)
		kStat=grass.read_command('r.univar',map=kPar, zones='basin')
		kMain_stat=kStat.splitlines()[12].split(':')[-1]	
		kMain_stat=float(kMain_stat)
		CNstat = grass.read_command('r.univar',map=CNmap, zones='basin')
		CN=CNstat.splitlines()[12].split(':')[-1]
		CN=float(CN)
		
		g.message('area basin in km2: ')
		print area_basin
		print 'mean elev: '
		print mean_elev-min_elev
		print 'delta H:'
		print deltaH
		print 'total reach length: '
		print total_length
		print 'a mean:'
		print aMain_stat
		print '\n b mean: '
		print bMain_stat
		print '\n k mean: '
		print kMain_stat
		print 'CN mean:'
		CN = 70.12/82.63 * CN
		print CN
		
		f_K_T = 1-kMain_stat*(0.45+0.799*np.log(-np.log(1-1./TR)))
		print 'f(k,T): '
		print f_K_T
		
		h=f_K_T*aMain_stat*corrivazione**bMain_stat
		print '\n h main:'
		print h
		X1 = 100*corrivazione/(0.236+0.062*corrivazione)
		X2 = 0.003*corrivazione+0.0234
		Pa = 100 - area_basin_Ha/(X1+X2*area_basin_Ha)
		Ha = h*Pa/100
		new = VectorTopo('outlet')
		S1 = (1000./CN)-10
		Pn = (Ha-5.08*S1)**2/(Ha+20.32*S1)
		Qc = (1/360.)*Pn*area_basin_Ha/corrivazione
		
		print 'discharge: '
		print Qc
		
		#print table.columns.types()
		#[u'INTEGER', u'TEXT', u'integer', u'double precision']
		
		
		
		'''
		------------------------------
		START CALCULATION OF LOCAL UPSTREAM SLOPE
		------------------------------
		'''
		#offsets for moving windows
		offsets = [d
			   for j in xrange(1,1+1)
			   for i in [j,-j]
			   for d in [(i,0),(0,i),(i,i),(i,-i)]]
		#rename dtm as elevation for future calculation if not exist
		if not VectorTopo('elevation').exist():
			grass.run_command('g.rename',raster="%s,elevation" % dem)
			elev_renamed=True
		
		#define drainage direction
		drainage_incoming = [2,4,3,1,6,8,7,5]
		drainage_outcoming = []
		diag_dist= (regione['nsres']**2+regione['ewres']**2)**0.5
		# [(1, 0), (0, 1), (1, 1), (1, -1), (-1, 0), (0, -1), (-1, -1), (-1, 1), 
		cell_dists = [regione['nsres'], 
						 regione['ewres'],
						 diag_dist,
						 diag_dist,
						 regione['nsres'],
						 regione['ewres'],
						 diag_dist,
						 diag_dist
						]
		# define the calculation term
		terms = ["(drainage[%d,%d] == %d && not(isnull(raster_streams[0,0])) && not(isnull(raster_streams[%d,%d])) )"
					 % ((offsets[j]+tuple([drainage_incoming[j]])+offsets[j]))
					for j in range(len(drainage_incoming))]
		
		   
		   
		 #define the operation expression
		terms_calc = [ "(elevation[%d,%d] - elevation) * %s" 
					% (offsets[j]+(terms[j],) ) for j in range(len(terms))]
		
		terms_calc_slope = [ "( (elevation[%d,%d] - elevation)/%10.4f ) * %s" 
					% (offsets[j]+(cell_dists[j],)+(terms[j],)) for j in range(len(terms))]
		
		expr = "num_cells_drain_into = (%s)" % " + ".join(terms)
		expr1 = "elevation_percentile4 = if(isnull(raster_streams),null(),(%s))" % " + ".join(terms)
		expr2 = "elevdiff_drain_into = %s" % " + ".join(terms_calc)
		expr3 = "slope_drain_into = %s" % " + ".join(terms_calc_slope)       
		
		# do the r.mapcalc calculation with the moving window
		# exclude the num_cell_calculation_into
		#grass.mapcalc( expr )
		#print expr2
		#grass.mapcalc(  expr2 , overwrite=True)
		#print expr3
		grass.mapcalc(  expr3 , overwrite=True)    
		
		
		'''
		------------------------------
		START CALCULATION OF 2KM UPSTREAM SLOPE
		------------------------------
		'''
		#create an outlet vector
		new_outlet = VectorTopo('outlet')
		COLS = [(u'cat',       'INTEGER PRIMARY KEY')]
		new_outlet.open('w', tab_name='outlet', tab_cols=COLS)
		new_outlet.write(Point( xoutlet , youtlet ), cat=1, )
		new_outlet.table.conn.commit()
		new_outlet.table.execute().fetchall()
		new_outlet.close()
		
		#find local main channel
		horton_order=grass.raster_what('horton', [[ xoutlet , youtlet ]])
		horton_order = int( horton_order[0]['horton']['value'] )
		print "Horton order for main channel:"
		print horton_order
		grass.run_command('g.region', zoom='horton')	
		grass.mapcalc( "main_stream = if((horton == %d),1,null())" % horton_order, overwrite=True )
		grass.run_command('r.to.vect', input='main_stream', output='main_stream', type='line',overwrite=True)
		grass.run_command('v.build.polylines', overwrite=True, input='main_stream', output='main_stream_poly', cats='first')
		
		#network analysis on main channel
		grass.run_command('v.net',input='main_stream_poly', points='outlet', output='main_stream_connected', operation='connect', threshold=radius*3,overwrite=True)
		grass.run_command('v.net.iso', input='main_stream_connected',output='main_stream_iso', center_cats=1, costs='100,200,400',overwrite=True)
		report=grass.read_command('v.category', input='main_stream_iso', option='report',type='line')
		min_max = report.split('\n')[3].split()[-2:]
		min_cat = int (min_max[0] )
		max_cat = int (min_max[1] )
		
		elev_outlet = grass.raster_what('elevation', [[ xoutlet , youtlet ]])
		elev_outlet = float( elev_outlet[0]['elevation']['value'] )
		
		drops = []
		for i in range(min_cat,max_cat):
			grass.run_command('v.extract',input='main_stream_iso' ,type='line', 
				cats=i, output='main_stream_%s' % i,overwrite=True)
			grass.run_command('v.to.points', input='main_stream_%s' % i,type='line',
				output='nodes',use='node',overwrite=True)
			points=grass.read_command('v.to.db',flags='p', map='nodes', type='point', 
					option='coor', columns='x,y', layer=2) 
			points=points.split('\n')[1:]
			points.remove('')
			
			elevations_drops = []
			print points
			for point in points:
				xpoint = float ( point.split('|')[1] )
				ypoint = float( point.split('|')[2] )
				elev = grass.raster_what('elevation', [[ xpoint , ypoint ]])
				elev = float( elev[0]['elevation']['value'] )
				elevations_drops.append(elev-elev_outlet)
			
			elevations_drops.sort(reverse=True)
			drops.append(elevations_drops[0])
		
		print 'list di drops:' 
		print drops
			
		
		
		
		#sample the raster slope in the outlets
		slope_query=grass.raster_what('slope_drain_into', [[ xoutlet , youtlet ]])
		slope = slope_query[0]['slope_drain_into']['value']
		if slope  == '0':
			slope = 1./10000
		else:
			slope = float( slope )
		
		
		
		dict_output = {
						'xoutlet':xoutlet,
						'youtlet':youtlet,
						'cat':category,
						'attrs':(Qc, slope, 9810.0*Qc*slope,total_length,elev_outlet,drops[0],drops[1],drops[2],drops[0]/100.,drops[1]/200.,drops[2]/400.,)
						}
		
		output_points.append(dict_output)
		print category
		category+=1
		print category
		
			
	#cleaning part
	if elev_renamed:
		grass.run_command('g.rename',raster='elevation,%s' % dem)
	grass.del_temp_region()
	grass.run_command('g.remove',flags='f', type='raster', name='main_stream,basin,circle,drainage,horton,raster_streams,slope_drain_into')
	
	grass.run_command('g.remove',flags='f', type='vector', name='main_stream,nodes,outlet')
	grass.run_command('g.remove',type='vector',pattern='main_stream*',flags='f')

	#creation of output data container
	print output_points
	new = VectorTopo('output')
	COLS = [(u'cat',       'INTEGER PRIMARY KEY'), (u'discharge',    u'double precision') , 
        (u'local_upslope',    u'double precision'), (u'TSP_local',    u'double precision'),
        (u'ch_len',    u'double precision'),(u'elev',    u'double precision'),
        (u'drop100',    u'double precision'), 
        (u'drop200',    u'double precision'), (u'drop400',    u'double precision'),
        
        (u'upslope_100',    u'double precision'),
        (u'upslope_200',    u'double precision'),(u'upslope_400',    u'double precision')
        ]

	new.open('w', tab_name='output', tab_cols=COLS)
    
	for elem in output_points:
		new.write(Point( elem['xoutlet'],
					elem['youtlet'] ), 
					cat = elem['cat'],
					attrs=elem['attrs']
				)
	new.table.conn.commit()
	new.table.execute().fetchall()
	new.close()
Ejemplo n.º 11
0
def main():

    try:
        from pygbif import occurrences
        from pygbif import species
    except ImportError:
        grass.fatal(_("Cannot import pygbif (https://github.com/sckott/pygbif)"
                      " library."
                      " Please install it (pip install pygbif)"
                      " or ensure that it is on path"
                      " (use PYTHONPATH variable)."))

    # Parse input options
    output = options['output']
    mask = options['mask']
    species_maps = flags['i']
    no_region_limit = flags['r']
    no_topo = flags['b']
    print_species = flags['p']
    print_species_table = flags['t']
    print_species_shell = flags['g']
    print_occ_number = flags['o']
    allow_no_geom = flags['n']
    hasGeoIssue = flags['s']
    taxa_list = options['taxa'].split(',')
    institutionCode = options['institutioncode']
    basisofrecord = options['basisofrecord']
    recordedby = options['recordedby'].split(',')
    date_from = options['date_from']
    date_to = options['date_to']
    country = options['country']
    continent = options['continent']
    rank = options['rank']

    # Define static variable
    #Initialize cat
    cat = 0
    # Number of occurrences to fetch in one request
    chunk_size = 300
    # lat/lon proj string
    latlon_crs = ['+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0.000,0.000,0.000',
                  '+proj=longlat +no_defs +a=6378137 +rf=298.257223563 +towgs84=0,0,0,0,0,0,0']
    # List attributes available in Darwin Core
    # not all attributes are returned in each request
    # to avoid key errors when accessing the dictionary returned by pygbif
    # presence of DWC keys in the returned dictionary is checked using this list
    # The number of keys in this list has to be equal to the number of columns
    # in the attribute table and the attributes written for each occurrence
    dwc_keys = ['key', 'taxonRank', 'taxonKey', 'taxonID', 'scientificName',
                'species', 'speciesKey', 'genericName', 'genus', 'genusKey',
                'family', 'familyKey', 'order', 'orderKey', 'class',
                'classKey', 'phylum', 'phylumKey', 'kingdom', 'kingdomKey',
                'eventDate', 'verbatimEventDate', 'startDayOfYear',
                'endDayOfYear', 'year', 'month', 'day', 'occurrenceID',
                'occurrenceStatus', 'occurrenceRemarks', 'Habitat',
                'basisOfRecord', 'preparations', 'sex', 'type', 'locality',
                'verbatimLocality', 'decimalLongitude', 'decimalLatitude',
                'geodeticDatum', 'higerGeography', 'continent', 'country',
                'countryCode', 'stateProvince', 'gbifID', 'protocol',
                'identifier', 'recordedBy', 'identificationID', 'identifiers',
                'dateIdentified', 'modified', 'institutionCode',
                'lastInterpreted', 'lastParsed', 'references', 'relations',
                'catalogNumber', 'occurrenceDetails', 'datasetKey',
                'datasetName', 'collectionCode', 'rights', 'rightsHolder',
                'license', 'publishingOrgKey', 'publishingCountry',
                'lastCrawled', 'specificEpithet', 'facts', 'issues',
                'extensions', 'language']
    # Deinfe columns for attribute table
    cols = [('cat',       'INTEGER PRIMARY KEY'),
            ('g_search',       'varchar(100)'),
            ('g_key',       'integer'),
            ('g_taxonrank',       'varchar(50)'),
            ('g_taxonkey',       'integer'),
            ('g_taxonid',       'varchar(50)'),
            ('g_scientificname',       'varchar(255)'),
            ('g_species',       'varchar(255)'),
            ('g_specieskey',       'integer'),
            ('g_genericname',       'varchar(255)'),
            ('g_genus',       'varchar(50)'),
            ('g_genuskey',       'integer'),
            ('g_family',       'varchar(50)'),
            ('g_familykey',       'integer'),
            ('g_order',       'varchar(50)'),
            ('g_orderkey',       'integer'),
            ('g_class',       'varchar(50)'),
            ('g_classkey',       'integer'),
            ('g_phylum',       'varchar(50)'),
            ('g_phylumkey',       'integer'),
            ('g_kingdom',       'varchar(50)'),
            ('g_kingdomkey',       'integer'),
            ('g_eventdate',       'text'),
            ('g_verbatimeventdate',       'varchar(50)'),
            ('g_startDayOfYear',       'integer'),
            ('g_endDayOfYear',       'integer'),
            ('g_year',       'integer'),
            ('g_month',       'integer'),
            ('g_day',       'integer'),
            ('g_occurrenceid',       'varchar(255)'),
            ('g_occurrenceStatus',       'varchar(50)'),
            ('g_occurrenceRemarks',       'varchar(50)'),
            ('g_Habitat',       'varchar(50)'),
            ('g_basisofrecord',       'varchar(50)'),
            ('g_preparations',       'varchar(50)'),
            ('g_sex',       'varchar(50)'),
            ('g_type',       'varchar(50)'),
            ('g_locality',       'varchar(255)'),
            ('g_verbatimlocality',       'varchar(255)'),
            ('g_decimallongitude',       'double precision'),
            ('g_decimallatitude',       'double precision'),
            ('g_geodeticdatum',       'varchar(50)'),
            ('g_higerGeography',       'varchar(255)'),
            ('g_continent',       'varchar(50)'),
            ('g_country',       'varchar(50)'),
            ('g_countryCode',       'varchar(50)'),
            ('g_stateProvince',       'varchar(50)'),
            ('g_gbifid',       'varchar(255)'),
            ('g_protocol',       'varchar(255)'),
            ('g_identifier',       'varchar(50)'),
            ('g_recordedby',       'varchar(255)'),
            ('g_identificationid',       'varchar(255)'),
            ('g_identifiers',       'text'),
            ('g_dateidentified',       'text'),
            ('g_modified',       'text'),
            ('g_institutioncode',       'varchar(50)'),
            ('g_lastinterpreted',       'text'),
            ('g_lastparsed',       'text'),
            ('g_references',       'varchar(255)'),
            ('g_relations',       'text'),
            ('g_catalognumber',       'varchar(50)'),
            ('g_occurrencedetails',       'text'),
            ('g_datasetkey',       'varchar(50)'),
            ('g_datasetname',       'varchar(255)'),
            ('g_collectioncode',       'varchar(50)'),
            ('g_rights',       'varchar(255)'),
            ('g_rightsholder',       'varchar(255)'),
            ('g_license',       'varchar(50)'),
            ('g_publishingorgkey',       'varchar(50)'),
            ('g_publishingcountry',       'varchar(50)'),
            ('g_lastcrawled',       'text'),
            ('g_specificepithet',       'varchar(50)'),
            ('g_facts',       'text'),
            ('g_issues',       'text'),
            ('g_extensions',       'text'),
            ('g_language',       'varchar(50)')]

    set_output_encoding()
    # Set temporal filter if requested by user
    # Initialize eventDate filter
    eventDate = None
    # Check if date from is compatible (ISO compliant)
    if date_from:
        try:
            parse(date_from)
        except:
            grass.fatal("Invalid invalid start date provided")

        if date_from and not date_to:
            eventDate = '{}'.format(date_from)
    # Check if date to is compatible (ISO compliant)
    if date_to:
        try:
            parse(date_to)
        except:
            grass.fatal("Invalid invalid end date provided")
        # Check if date to is after date_from
        if parse(date_from) < parse(date_to):
            eventDate = '{},{}'.format(date_from, date_to)
        else:
            grass.fatal("Invalid date range: End date has to be after start date!")
    # Set filter on basisOfRecord if requested by user
    if basisofrecord == 'ALL':
        basisOfRecord = None
    else:
        basisOfRecord = basisofrecord
    # Allow also occurrences with spatial issues if requested by user
    hasGeospatialIssue = False
    if hasGeoIssue:
        hasGeospatialIssue = True
    # Allow also occurrences without coordinates if requested by user
    hasCoordinate = True
    if allow_no_geom:
        hasCoordinate = False

    # Set reprojection parameters
    # Set target projection of current LOCATION
    target_crs = grass.read_command('g.proj', flags='fj').rstrip(os.linesep)
    target = osr.SpatialReference(target_crs)
    target.ImportFromProj4(target_crs)
    if target == 'XY location (unprojected)':
        grass.fatal("Sorry, XY locations are not supported!")

    # Set source projection from GBIF
    source = osr.SpatialReference()
    source.ImportFromEPSG(4326)
    if target_crs not in latlon_crs:
        transform = osr.CoordinateTransformation(source, target)
        reverse_transform = osr.CoordinateTransformation(target, source)

    # Generate WKT polygon to use for spatial filtering if requested
    if mask:
        if len(mask.split('@')) == 2:
            m = VectorTopo(mask.split('@')[0], mapset=mask.split('@')[1])
        else:
            m = VectorTopo(mask)
        if not m.exist():
            grass.fatal('Could not find vector map <{}>'.format(mask))
        m.open('r')
        if not m.is_open():
            grass.fatal('Could not open vector map <{}>'.format(mask))

        # Use map Bbox as spatial filter if map contains <> 1 area
        if m.number_of('areas') == 1:
            region_pol = [area.to_wkt() for area in m.viter("areas")][0]
        else:
            bbox = str(m.bbox()).replace('Bbox(', '').replace(' ', '').rstrip(')').split(',')
            region_pol = 'POLYGON(({0} {1}, {0} {3}, {2} {3}, {2} {1}, {0} {1}))'.format(bbox[2],
                         bbox[0], bbox[3], bbox[1])
        m.close()
    else:
        # Do not limit import spatially if LOCATION is able to take global data
        if no_region_limit:
            if target_crs not in latlon_crs:
                grass.fatal('Import of data from outside the current region is'
                            'only supported in a WGS84 location!')
            region_pol = None
        else:
            # Limit import spatially to current region
            # if LOCATION is !NOT! able to take global data
            # to avoid pprojection ERRORS
            region = grass.parse_command('g.region', flags='g')
            region_pol = 'POLYGON(({0} {1}, {0} {3}, {2} {3}, {2} {1}, {0} {1}))'.format(region['e'],
                         region['n'], region['w'], region['s'])

    # Do not reproject in latlon LOCATIONS
    if target_crs not in latlon_crs:
        pol = ogr.CreateGeometryFromWkt(region_pol)
        pol.Transform(reverse_transform)
        pol = pol.ExportToWkt()
    else:
        pol = region_pol

    # Create output map if not output maps for each species are requested
    if not species_maps and not print_species and not print_species_shell and not print_occ_number and not print_species_table:
        mapname = output
        new = Vector(mapname)
        new.open('w', tab_name=mapname, tab_cols=cols)
        cat = 1

    # Import data for each species
    for s in taxa_list:
        # Get the taxon key if not the taxon key is provided as input
        try:
            key = int(s)
        except:
            try:
                species_match = species.name_backbone(s, rank=rank,
                                                      strict=False,
                                                      verbose=True)
                key = species_match['usageKey']
            except:
                grass.error('Data request for taxon {} failed. Are you online?'.format(s))
                continue

        # Return matching taxon and alternatives and exit
        if print_species:
            print('Matching taxon for {} is:'.format(s))
            print('{} {}'.format(species_match['scientificName'], species_match['status']))
            if 'alternatives' in list(species_match.keys()):
                print('Alternative matches might be:'.format(s))
                for m in species_match['alternatives']:
                    print('{} {}'.format(m['scientificName'], m['status']))
            else:
                print('No alternatives found for the given taxon')
            continue
        if print_species_shell:
            print('match={}'.format(species_match['scientificName']))
            if 'alternatives' in list(species_match.keys()):
                alternatives = []
                for m in species_match['alternatives']:
                    alternatives.append(m['scientificName'])
                print('alternatives={}'.format(','.join(alternatives)))
            continue
        if print_species_table:
            if 'alternatives' in list(species_match.keys()):
                if len(species_match['alternatives']) == 0:
                    print('{0}|{1}|{2}|'.format(s, key, species_match['scientificName']))
                else:
                    alternatives = []
                    for m in species_match['alternatives']:
                        alternatives.append(m['scientificName'])
                    print('{0}|{1}|{2}|{3}'.format(s, key, species_match['scientificName'],
                                                    ','.join(alternatives)))
            continue
        try:
            returns_n = occurrences.search(taxonKey=key,
                                           hasGeospatialIssue=hasGeospatialIssue,
                                           hasCoordinate=hasCoordinate,
                                           institutionCode=institutionCode,
                                           basisOfRecord=basisOfRecord,
                                           recordedBy=recordedby,
                                           eventDate=eventDate,
                                           continent=continent,
                                           country=country,
                                           geometry=pol,
                                           limit=1)['count']
        except:
            grass.error('Data request for taxon {} faild. Are you online?'.format(s))
            returns_n = 0

        # Exit if search does not give a return
        # Print only number of returns for the given search and exit
        if print_occ_number:
            grass.message('Found {0} occurrences for taxon {1}...'.format(returns_n, s))
            continue
        elif returns_n <= 0:
            grass.warning('No occurrences for current search for taxon {0}...'.format(s))
            continue
        elif returns_n >= 200000:
            grass.warning('Your search for {1} returns {0} records.\n'
                          'Unfortunately, the GBIF search API is limited to 200,000 records per request.\n'
                          'The download will be incomplete. Please consider to split up your search.'.format(returns_n, s))

        # Get the number of chunks to download
        chunks = int(math.ceil(returns_n / float(chunk_size)))
        grass.verbose('Downloading {0} occurrences for taxon {1}...'.format(returns_n, s))

        # Create a map for each species if requested using map name as suffix
        if species_maps:
            mapname = '{}_{}'.format(s.replace(' ', '_'), output)

            new = Vector(mapname)
            new.open('w', tab_name=mapname, tab_cols=cols)
            cat = 0

        # Download the data from GBIF
        for c in range(chunks):
            # Define offset
            offset = c * chunk_size
            # Adjust chunk_size to the hard limit of 200,000 records in GBIF API
            # if necessary
            if offset + chunk_size >= 200000:
                chunk_size = 200000 - offset
            # Get the returns for the next chunk
            returns = occurrences.search(taxonKey=key,
                                         hasGeospatialIssue=hasGeospatialIssue,
                                         hasCoordinate=hasCoordinate,
                                         institutionCode=institutionCode,
                                         basisOfRecord=basisOfRecord,
                                         recordedBy=recordedby,
                                         eventDate=eventDate,
                                         continent=continent,
                                         country=country,
                                         geometry=pol,
                                         limit=chunk_size,
                                         offset=offset)

            # Write the returned data to map and attribute table
            for res in returns['results']:
                if target_crs not in latlon_crs:
                    point = ogr.CreateGeometryFromWkt('POINT ({} {})'.format(res['decimalLongitude'], res['decimalLatitude']))
                    point.Transform(transform)
                    x = point.GetX()
                    y = point.GetY()
                else:
                    x = res['decimalLongitude']
                    y = res['decimalLatitude']

                point = Point(x, y)

                for k in dwc_keys:
                    if k not in list(res.keys()):
                        res.update({k: None})

                cat = cat + 1
                new.write(point, cat=cat, attrs=(
                          '{}'.format(s),
                          res['key'],
                          res['taxonRank'],
                          res['taxonKey'],
                          res['taxonID'],
                          res['scientificName'],
                          res['species'],
                          res['speciesKey'],
                          res['genericName'],
                          res['genus'],
                          res['genusKey'],
                          res['family'],
                          res['familyKey'],
                          res['order'],
                          res['orderKey'],
                          res['class'],
                          res['classKey'],
                          res['phylum'],
                          res['phylumKey'],
                          res['kingdom'],
                          res['kingdomKey'],
                          '{}'.format(res['eventDate']) if res['eventDate'] else None,
                          '{}'.format(res['verbatimEventDate']) if res['verbatimEventDate'] else None,
                          res['startDayOfYear'],
                          res['endDayOfYear'],
                          res['year'],
                          res['month'],
                          res['day'],
                          res['occurrenceID'],
                          res['occurrenceStatus'],
                          res['occurrenceRemarks'],
                          res['Habitat'],
                          res['basisOfRecord'],
                          res['preparations'],
                          res['sex'],
                          res['type'],
                          res['locality'],
                          res['verbatimLocality'],
                          res['decimalLongitude'],
                          res['decimalLatitude'],
                          res['geodeticDatum'],
                          res['higerGeography'],
                          res['continent'],
                          res['country'],
                          res['countryCode'],
                          res['stateProvince'],
                          res['gbifID'],
                          res['protocol'],
                          res['identifier'],
                          res['recordedBy'],
                          res['identificationID'],
                          ','.join(res['identifiers']),
                          '{}'.format(res['dateIdentified']) if res['dateIdentified'] else None,
                          '{}'.format(res['modified']) if res['modified'] else None,
                          res['institutionCode'],
                          '{}'.format(res['lastInterpreted']) if res['lastInterpreted'] else None,
                          '{}'.format(res['lastParsed']) if res['lastParsed'] else None,
                          res['references'],
                          ','.join(res['relations']),
                          res['catalogNumber'],
                          '{}'.format(res['occurrenceDetails']) if res['occurrenceDetails'] else None,
                          res['datasetKey'],
                          res['datasetName'],
                          res['collectionCode'],
                          res['rights'],
                          res['rightsHolder'],
                          res['license'],
                          res['publishingOrgKey'],
                          res['publishingCountry'],
                          '{}'.format(res['lastCrawled']) if res['lastCrawled'] else None,
                          res['specificEpithet'],
                          ','.join(res['facts']),
                          ','.join(res['issues']),
                          ','.join(res['extensions']),
                          res['language'],))

                cat = cat + 1

        # Close the current map if a map for each species is requested
        if species_maps:
            new.table.conn.commit()
            new.close()
            if not no_topo:
                grass.run_command('v.build', map=mapname, option='build')

    # Close the output map if not a map for each species is requested
    if not species_maps and not print_species and not print_species_shell and not print_occ_number and not print_species_table:
        new.table.conn.commit()
        new.close()
        if not no_topo:
            grass.run_command('v.build', map=mapname, option='build')
Ejemplo n.º 12
0
def full_maps(parsed_obs, offering, seconds_granularity, resolution,
              event_time, target):
    """Create raster maps.

    Maps represent represent offerings, observed props and procedures

    :param parsed_obs: Observations for a given offering in geoJSON format
    :param offering: A collection of sensors used to conveniently group them up
    :param seconds_granularity: Granularity in seconds
    :param resolution: 2D grid resolution for rasterization
    :param event_time: Timestamp of first/of last requested observation
    :param target:
    """
    timestamp_pattern = '%Y-%m-%dT%H:%M:%S'  # TODO: Timezone
    start_time = event_time.split('+')[0]
    epoch_s = int(time.mktime(time.strptime(start_time, timestamp_pattern)))
    end_time = event_time.split('+')[1].split('/')[1]
    epoch_e = int(time.mktime(time.strptime(end_time, timestamp_pattern)))

    for key, observation in parsed_obs.items():
        print('Creating raster maps for offering '
              '{}, observed property {}'.format(offering, key))

        data = json.loads(observation)
        crs = data['crs']
        crs = int(crs['properties']['name'].split(':')[-1])
        transform = soslib.get_transformation(crs, target)

        cols = [(u'cat', 'INTEGER PRIMARY KEY'), (u'name', 'VARCHAR'),
                (u'value', 'DOUBLE')]

        geometries = dict()
        intervals = {}
        for secondsStamp in range(epoch_s, epoch_e + 1, seconds_granularity):
            intervals.update({secondsStamp: dict()})

        timestamp_pattern = 't%Y%m%dT%H%M%S'  # TODO: Timezone

        for a in data['features']:
            name = a['properties']['name']

            sx, sy, sz = a['geometry']['coordinates']
            point = ogr.CreateGeometryFromWkt('POINT ({} {} {})'.format(
                sx, sy, sz))
            point.Transform(transform)
            coords = (point.GetX(), point.GetY(), point.GetZ())
            geometries.update({name: coords})

            for timestamp, value in a['properties'].items():
                if timestamp != 'name':
                    observation_start_time = timestamp[:-4]
                    seconds_timestamp = int(
                        time.mktime(
                            time.strptime(observation_start_time,
                                          timestamp_pattern)))
                    for interval in intervals.keys():
                        if interval <= seconds_timestamp < (
                                interval + seconds_granularity):
                            if name in intervals[interval].keys():
                                intervals[interval][name].append(float(value))
                            else:
                                intervals[interval].update(
                                    {name: [float(value)]})
                            break

        for interval in intervals.keys():
            if len(intervals[interval]) != 0:
                timestamp = datetime.datetime.fromtimestamp(interval).strftime(
                    't%Y%m%dT%H%M%S')

                table_name = '{}_{}_{}_{}'.format(options['output'], offering,
                                                  key, timestamp)
                if ':' in table_name:
                    table_name = '_'.join(table_name.split(':'))
                if '-' in table_name:
                    table_name = '_'.join(table_name.split('-'))
                if '.' in table_name:
                    table_name = '_'.join(table_name.split('.'))

                new = VectorTopo(table_name)
                if overwrite() is True:
                    try:
                        new.remove()
                    except:
                        pass

                new.open(mode='w',
                         layer=1,
                         tab_name=table_name,
                         link_name=table_name,
                         tab_cols=cols,
                         overwrite=True)
                i = 0
                n = None
                s = None
                e = None
                w = None

                for procedure, values in intervals[interval].items():
                    if new.exist() is False:
                        i = 1
                    else:
                        i += 1

                    if options['method'] == 'average':
                        value = sum(values) / len(values)
                    elif options['method'] == 'sum':
                        value = sum(values)
                    # TODO: Other aggregations methods

                    new.write(Point(*geometries[procedure]),
                              cat=i,
                              attrs=(
                                  procedure,
                                  value,
                              ))

                    if options['bbox'] == '':
                        x, y, z = geometries[procedure]
                        if not n:
                            n = y + resolution / 2
                            s = y - resolution / 2
                            e = x + resolution / 2
                            w = x - resolution / 2
                        else:
                            if y >= n:
                                n = y + resolution / 2
                            if y <= s:
                                s = y - resolution / 2
                            if x >= e:
                                e = x + resolution / 2
                            if x <= w:
                                w = x - resolution / 2

                new.table.conn.commit()

                new.close(build=False)
                run_command('v.build', quiet=True, map=table_name)

                if options['bbox'] == '':
                    run_command('g.region', n=n, s=s, w=w, e=e, res=resolution)

                run_command('v.to.rast',
                            input=table_name,
                            output=table_name,
                            use='attr',
                            attribute_column='value',
                            layer=1,
                            type='point',
                            quiet=True)

                if flags['k'] is False:
                    run_command('g.remove',
                                'f',
                                type='vector',
                                name=table_name,
                                quiet=True)
Ejemplo n.º 13
0
    def test_all(self):
        self.assertModule(
            "v.stream.order",
            input="stream_network",
            points="stream_network_outlets",
            output="stream_network_order_test_all",
            threshold=25,
            order=["strahler", "shreve", "drwal", "scheidegger"],
            overwrite=True,
            verbose=True,
        )

        # Check all values
        v = VectorTopo(name="stream_network_order_test_all", mapset="")
        v.open(mode="r")
        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 41)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)
        self.assertEqual(v.read(4).attrs["shreve"], 32)
        self.assertEqual(v.read(4).attrs["drwal"], 6)
        self.assertEqual(v.read(4).attrs["scheidegger"], 64)
        v.close()

        # Check for column copy
        self.assertModule(
            "v.stream.order",
            input="stream_network_order_test_all",
            points="stream_network_outlets",
            output="stream_network_order_test_all_2",
            threshold=25,
            order=["strahler", "shreve", "drwal", "scheidegger"],
            columns=["strahler", "shreve", "drwal", "scheidegger"],
            overwrite=True,
            verbose=True,
        )

        # Check all values and their copies
        v = VectorTopo(name="stream_network_order_test_all_2", mapset="")
        v.open(mode="r")
        self.assertTrue(v.exist(), True)
        self.assertEqual(v.num_primitive_of("line"), 101)
        # feature 4
        self.assertEqual(v.read(4).attrs.cat, 4)
        self.assertEqual(v.read(4).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(4).attrs["network"], 1)
        self.assertEqual(v.read(4).attrs["reversed"], 0)
        self.assertEqual(v.read(4).attrs["strahler"], 4)
        self.assertEqual(v.read(4).attrs["shreve"], 32)
        self.assertEqual(v.read(4).attrs["drwal"], 6)
        self.assertEqual(v.read(4).attrs["scheidegger"], 64)
        self.assertEqual(v.read(4).attrs["strahler_1"], 4)
        self.assertEqual(v.read(4).attrs["shreve_1"], 32)
        self.assertEqual(v.read(4).attrs["drwal_1"], 6)
        self.assertEqual(v.read(4).attrs["scheidegger_1"], 64)
        # feature 7
        self.assertEqual(v.read(7).attrs.cat, 7)
        self.assertEqual(v.read(7).attrs["outlet_cat"], 1)
        self.assertEqual(v.read(7).attrs["network"], 1)
        self.assertEqual(v.read(7).attrs["reversed"], 0)
        self.assertEqual(v.read(7).attrs["strahler"], 2)
        self.assertEqual(v.read(7).attrs["strahler_1"], 2)
        self.assertEqual(v.read(7).attrs["shreve"], 4)
        self.assertEqual(v.read(7).attrs["drwal"], 3)
        self.assertEqual(v.read(7).attrs["scheidegger"], 8)
        v.close()