예제 #1
0
    def load_map(self):
	run('g.region', **self.wind)

	p = grass.pipe_command('r.out.ascii', input = self.inmap, quiet = True)
	self.wind = self.read_header(p.stdout)
	self.values = self.read_data(p.stdout)
	self.changed = [[False for c in row] for row in self.values]
	p.wait()

	self.clear_changes()

	run('r.out.ppm', input = self.inmap, output = self.tempfile)
	colorimg = wx.Image(self.tempfile)
	grass.try_remove(self.tempfile)

	for row in range(self.wind['rows']):
	    for col in range(self.wind['cols']):
		val = self.values[row][col]
		if val in self.colors:
		    continue
		r = colorimg.GetRed(col, row)
		g = colorimg.GetGreen(col, row)
		b = colorimg.GetBlue(col, row)
		color = "#%02x%02x%02x" % (r, g, b)
		self.colors[val] = color

	colorimg.Destroy()
예제 #2
0
        def load_map(self):
            run("g.region", **self.wind)

            p = grass.pipe_command("r.out.ascii", input=self.inmap, quiet=True)
            self.wind = self.read_header(p.stdout)
            self.values = self.read_data(p.stdout)
            self.changed = [[False for c in row] for row in self.values]
            p.wait()

            self.clear_changes()

            run("r.out.ppm", input=self.inmap, output=self.tempfile)
            colorimg = wx.Image(self.tempfile)
            grass.try_remove(self.tempfile)

            for row in range(self.wind["rows"]):
                for col in range(self.wind["cols"]):
                    val = self.values[row][col]
                    if val in self.colors:
                        continue
                    r = colorimg.GetRed(col, row)
                    g = colorimg.GetGreen(col, row)
                    b = colorimg.GetBlue(col, row)
                    color = "#%02x%02x%02x" % (r, g, b)
                    self.colors[val] = color

            colorimg.Destroy()
예제 #3
0
    def load_aspect(self):
	if not self.aspect:
	    return

	p = grass.pipe_command('r.out.ascii', input = self.aspect, quiet = True)
	self.read_header(p.stdout)
	self.angles = self.read_data(p.stdout)
	p.wait()
예제 #4
0
    def load_aspect(self):
        if not self.aspect:
            return

        p = grass.pipe_command('r.out.ascii', input=self.aspect, quiet=True)
        self.read_header(p.stdout)
        self.angles = self.read_data(p.stdout)
        p.wait()
예제 #5
0
def get_nr_of_categories(vector, layer, rasters, rastertmp, percentile,
                         colprefixes, basecols, dbfdriver, c):
    """Get number of raster categories to be processed.

    Perform also checks of raster and vector categories. In the case of no
    raster categories, create the desired columns and exit.

    :param vector: name of vector map or data source for direct OGR access
    :param layer: layer number or name
    :param rastertmp: name of temporary raster map
    :return: number of raster categories or exit (if no categories found)
    """
    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command("r.category", map=rastertmp, sep=";", quiet=True)
    cats = []

    for line in p.stdout:
        line = decode(line)
        cats.append(line.rstrip("\r\n").split(";")[0])
    p.wait()

    number = len(cats)
    if number < 1:
        # create columns and exit
        grass.warning(_("No categories found in raster map"))
        for i in range(len(rasters)):
            set_up_columns(
                vector,
                layer,
                percentile,
                colprefixes[i],
                basecols,
                dbfdriver,
                flags["c"],
            )
            sys.exit(0)

    # Check if all categories got converted
    # Report categories from vector map
    vect_cats = (grass.read_command("v.category",
                                    input=vector,
                                    option="report",
                                    flags="g").rstrip("\n").split("\n"))

    # get number of all categories in selected layer
    vect_cats_n = 0  # to be modified below
    for vcl in vect_cats:
        if vcl.split(" ")[0] == layer and vcl.split(" ")[1] == "all":
            vect_cats_n = int(vcl.split(" ")[2])

    if vect_cats_n != number:
        grass.warning(
            _("Not all vector categories converted to raster. \
                        Converted {0} of {1}.".format(number, vect_cats_n)))

    return number
예제 #6
0
def copy_colors(fh, map, offset):
    p = gscript.pipe_command('r.colors.out', map=map)
    for line in p.stdout:
        f = line.rstrip('\r\n').split(' ')
        if offset:
            if f[0] in ['nv', 'default']:
                continue
            f[0] = str(float(f[0]) + offset)
        fh.write(' '.join(f) + '\n')
    p.wait()
예제 #7
0
def copy_colors(fh, map, offset):
    p = gscript.pipe_command("r.colors.out", map=map)
    for line in p.stdout:
        f = gscript.decode(line).rstrip("\r\n").split(" ")
        if offset:
            if f[0] in ["nv", "default"]:
                continue
            f[0] = str(float(f[0]) + offset)
        fh.write(gscript.encode(" ".join(f) + "\n"))
    p.wait()
예제 #8
0
def copy_colors(fh, map, offset):
    p = grass.pipe_command('r.colors.out', map=map)
    for line in p.stdout:
        f = line.rstrip('\r\n').split(' ')
        if offset:
            if f[0] in ['nv', 'default']:
                continue
            f[0] = str(float(f[0]) + offset)
        fh.write(' '.join(f) + '\n')
    p.wait()
예제 #9
0
def perform_stats(
    raster,
    percentile,
    fi,
    dbfdriver,
    colprefix,
    variables_dbf,
    variables,
    colnames,
    extstat,
):
    with open(sqltmp, "w") as f:
        # do the stats
        p = grass.pipe_command(
            "r.univar",
            flags="t" + extstat,
            map=raster,
            zones=rastertmp,
            percentile=percentile,
            sep=";",
        )

        first_line = 1

        f.write("{0}\n".format(grass.db_begin_transaction(fi["driver"])))
        for line in p.stdout:
            if first_line:
                first_line = 0
                continue

            vars = decode(line).rstrip("\r\n").split(";")

            f.write("UPDATE %s SET" % fi["table"])
            first_var = 1
            for colname in colnames:
                variable = colname.replace("%s_" % colprefix, "", 1)
                if dbfdriver:
                    variable = variables_dbf[variable]
                i = variables[variable]
                value = vars[i]
                # convert nan, +nan, -nan, inf, +inf, -inf, Infinity, +Infinity,
                # -Infinity to NULL
                if value.lower().endswith("nan") or "inf" in value.lower():
                    value = "NULL"
                if not first_var:
                    f.write(" , ")
                else:
                    first_var = 0
                f.write(" %s=%s" % (colname, value))

            f.write(" WHERE %s=%s;\n" % (fi["key"], vars[0]))
        f.write("{0}\n".format(grass.db_commit_transaction(fi["driver"])))
        p.wait()
예제 #10
0
def main():
    options, flags = gs.parser()

    vector = options["input"]
    layer = 1
    raster = options["output"]
    method = options["method"]
    z = 3
    sep = "pipe"
    out_args = {}

    if not gs.find_file(vector, element="vector")["fullname"]:
        gs.fatal("Vector map <{0}> not found".format(vector))

    if options["column"]:
        z = 4
        out_args["column"] = options["column"]
        out_args["where"] = "{0} IS NOT NULL".format(options["column"])

        columns = gs.vector_columns(vector)

        if options["column"] not in columns:
            gs.fatal(_("Column <{0}> not found".format(options["column"])))
        if columns[options["column"]]["type"] not in ("INTEGER",
                                                      "DOUBLE PRECISION"):
            gs.fatal(_("Column <{0}> is not numeric".format(
                options["column"])))

    out_process = gs.pipe_command(
        "v.out.ascii",
        input=vector,
        layer=layer,
        format="point",
        separator=sep,
        flags="r",
        **out_args,
    )
    in_process = gs.start_command(
        "r.in.xyz",
        input="-",
        output=raster,
        method=method,
        z=z,
        separator=sep,
        stdin=out_process.stdout,
    )
    in_process.communicate()
    out_process.wait()

    return 0
예제 #11
0
def perform_stats(raster, percentile, fi, dbfdriver, colprefix, variables_dbf,
                  variables, colnames, extstat):
    with open(sqltmp, 'w') as f:
        # do the stats
        p = grass.pipe_command('r.univar',
                               flags='t' + extstat,
                               map=raster,
                               zones=rastertmp,
                               percentile=percentile,
                               sep=';')

        first_line = 1

        f.write("{0}\n".format(grass.db_begin_transaction(fi['driver'])))
        for line in p.stdout:
            if first_line:
                first_line = 0
                continue

            vars = decode(line).rstrip('\r\n').split(';')

            f.write("UPDATE %s SET" % fi['table'])
            first_var = 1
            for colname in colnames:
                variable = colname.replace("%s_" % colprefix, '', 1)
                if dbfdriver:
                    variable = variables_dbf[variable]
                i = variables[variable]
                value = vars[i]
                # convert nan, +nan, -nan, inf, +inf, -inf, Infinity, +Infinity,
                # -Infinity to NULL
                if value.lower().endswith('nan') or 'inf' in value.lower():
                    value = 'NULL'
                if not first_var:
                    f.write(" , ")
                else:
                    first_var = 0
                f.write(" %s=%s" % (colname, value))

            f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))
        f.write("{0}\n".format(grass.db_commit_transaction(fi['driver'])))
        p.wait()
예제 #12
0
def get_maps(stvds):
    """
    Get vector maps registered in an input stvds
    :param stvds: Spatio temporal vector dataset intended to convert
    :return maps: dictionary in format {vector map: [layers of vector map]}
    """

    listOutput = pipe_command('t.vect.list', input=stvds)
    listOutput = listOutput.communicate()[0]
    maps = dict()
    first = True

    for oneMap in listOutput.splitlines():
        if first is False:
            if oneMap.split('|')[0] in maps.keys():
                maps[oneMap.split('|')[0]].append(oneMap.split('|')[1])
            else:
                maps.update({oneMap.split('|')[0]: [oneMap.split('|')[1]]})
        else:
            first = False

    return maps
예제 #13
0
def main():
    options, flags = gs.parser()

    vector = options['input']
    layer = 1
    raster = options['output']
    method = options['method']
    z = 3
    sep = 'pipe'
    out_args = {}

    if not gs.find_file(vector, element='vector')['fullname']:
        gs.fatal('Vector map <{0}> not found'.format(vector))

    if options['column']:
        z = 4
        out_args['column'] = options['column']
        out_args['where'] = '{0} IS NOT NULL'.format(options['column'])

        columns = gs.vector_columns(vector)

        if options['column'] not in columns:
            gs.fatal(_('Column <{0}> not found'.format(options['column'])))
        if columns[options['column']]['type'] not in ('INTEGER', 'DOUBLE PRECISION'):
            gs.fatal(_('Column <{0}> is not numeric'.format(options['column'])))


    out_process = gs.pipe_command(
        'v.out.ascii', input=vector, layer=layer, format='point',
        separator=sep, flags='r', **out_args)
    in_process = gs.start_command(
        'r.in.xyz', input='-', output=raster, method=method, z=z,
        separator=sep, stdin=out_process.stdout)
    in_process.communicate()
    out_process.wait()

    return 0
예제 #14
0
def output_centerline(river, stations, elev, outfile):
	""" 
	Output the river network, including centerline for each reach
	and coordinates for all stations along each reach
	"""

	# Begin with the list of reach cats
	rc=grass.read_command('v.category', input=river, type="line", option="print")
	reach_cats=rc.strip().split("\n")
	
	outfile.write("BEGIN STREAM NETWORK:\n")
	# Now get points from the river vector, one pair for each reach
	for i in range(len(reach_cats)):
		where_cond="cat="+str(reach_cats[i])
		riv=grass.read_command('v.db.select',map=river, separator=" ", 
				columns="cat,start_x,start_y,end_x,end_y", where=where_cond, flags="c")
		riv_pts=riv.strip().split(" ")
		pi,x1,y1,x2,y2 = riv_pts[0],riv_pts[1],riv_pts[2],riv_pts[3],riv_pts[4]
		# Give the start point a point id of "cat"1, and the endpoint an id of "cat"2
		pi1 = pi+"1"
		pi2 = pi+"2"
		# Get elevation of each endpoint from the elev raster
		coords=x1+","+y1
		e = grass.read_command('r.what', map=elev, coordinates=coords, separator=",")
		start_elev=e.split(",")[3].rstrip()
		coords=x2+","+y2
		e = grass.read_command('r.what', map=elev, coordinates=coords, separator=",")
		end_elev=e.split(",")[3].rstrip()
		outfile.write(" ENDPOINT: "+x1+","+y1+","+start_elev+","+pi1+"\n")
		outfile.write(" ENDPOINT: "+x2+","+y2+","+end_elev+","+pi2+"\n")

	# Loop thru the reach_cats again, and output a REACH: section for each reach, 
	# with all points for that reach
	for i in range(len(reach_cats)):
		outfile.write(" REACH:\n")
		outfile.write("   STREAM ID: %s\n" % river)
		reach_id=str(reach_cats[i])
		outfile.write("   REACH ID: %s\n" % reach_id)
		# Get the FROM POINT and TO POINT ids just like above
		where_cond="cat="+str(reach_cats[i])
		riv=grass.read_command('v.db.select',map=river, separator=" ", 
				columns="cat,start_x,end_x,start_y,end_y", where=where_cond, flags="c")
		r_pts=riv.strip().split(" ")
		pi,x1,y1,x2,y2 = r_pts[0],r_pts[1],r_pts[2],r_pts[3],r_pts[4]
		# Give the start point a point id of "cat"1, and the endpoint a n id of "cat"2
		pi1 = pi+"1"
		pi2 = pi+"2"
		outfile.write("   FROM POINT: %s\n" % pi1)
		outfile.write("   TO POINT: %s\n" % pi2)

		# Now the actual points along centerline
		outfile.write("   CENTERLINE:\n")
		# loop thru the stations point vector to get each station's x,y and id
		reach_cond="reach_id="+reach_cats[i]
		p=grass.pipe_command('v.db.select', map=stations, where=reach_cond, quiet=True, flags="c")
		st_list=[]
		for line in p.stdout:
			st=line.strip().split('|')
			s,x,y = st[0], st[1], st[2]
			st_list.append([s,x,y])

		p.stdout.close()
		p.wait()
		# Now write out all station points to the CENTERLINE section
		# Go thru the st_list in reverse order so that the centerline is from upstream to downstream
		for i in range(len(st_list)-1, -1, -1):
			outfile.write("	"+st_list[i][1]+","+st_list[i][2]+",NULL,"+st_list[i][0]+"\n")

		outfile.write(" END:\n")

	# Close STREAM NETWORK section
	outfile.write("END STREAM NETWORK:\n\n")
예제 #15
0
def reclass(inf, outf, lim, clump, diag, les):
    infile = inf
    outfile = outf
    lesser = les
    limit = lim
    clumped = clump
    diagonal = diag

    s = grass.read_command("g.region", flags='p')
    s = decode(s)
    kv = grass.parse_key_val(s, sep=':')
    s = kv['projection'].strip().split()
    if s == '0':
        grass.fatal(_("xy-locations are not supported"))
        grass.fatal(_("Need projected data with grids in meters"))

    if not grass.find_file(infile)['name']:
        grass.fatal(_("Raster map <%s> not found") % infile)

    if clumped and diagonal:
        grass.fatal(_("flags c and d are mutually exclusive"))

    if clumped:
        clumpfile = infile
    else:
        clumpfile = "%s.clump.%s" % (infile.split('@')[0], outfile)
        TMPRAST.append(clumpfile)

        if not grass.overwrite():
            if grass.find_file(clumpfile)['name']:
                grass.fatal(_("Temporary raster map <%s> exists") % clumpfile)
        if diagonal:
            grass.message(
                _("Generating a clumped raster file including "
                  "diagonal neighbors..."))
            grass.run_command('r.clump',
                              flags='d',
                              input=infile,
                              output=clumpfile)
        else:
            grass.message(_("Generating a clumped raster file ..."))
            grass.run_command('r.clump', input=infile, output=clumpfile)

    if lesser:
        grass.message(
            _("Generating a reclass map with area size less than "
              "or equal to %f hectares...") % limit)
    else:
        grass.message(
            _("Generating a reclass map with area size greater "
              "than or equal to %f hectares...") % limit)

    recfile = outfile + '.recl'
    TMPRAST.append(recfile)

    sflags = 'aln'
    if grass.raster_info(infile)['datatype'] in ('FCELL', 'DCELL'):
        sflags += 'i'
    p1 = grass.pipe_command('r.stats',
                            flags=sflags,
                            input=(clumpfile, infile),
                            sep=';')
    p2 = grass.feed_command('r.reclass',
                            input=clumpfile,
                            output=recfile,
                            rules='-')
    rules = ''
    for line in p1.stdout:
        f = decode(line).rstrip(os.linesep).split(';')
        if len(f) < 5:
            continue
        hectares = float(f[4]) * 0.0001
        if lesser:
            test = hectares <= limit
        else:
            test = hectares >= limit
        if test:
            rules += "%s = %s %s\n" % (f[0], f[2], f[3])
    if rules:
        p2.stdin.write(encode(rules))
    p1.wait()
    p2.stdin.close()
    p2.wait()
    if p2.returncode != 0:
        if lesser:
            grass.fatal(
                _("No areas of size less than or equal to %f "
                  "hectares found.") % limit)
        else:
            grass.fatal(
                _("No areas of size greater than or equal to %f "
                  "hectares found.") % limit)
    grass.mapcalc("$outfile = $recfile", outfile=outfile, recfile=recfile)
예제 #16
0
def main():
    global tmp, tmp_proj, tmp_gpx, tmp_extr, tmp_vogb

    format = options['format']
    input = options['input']
    layer = options['layer']
    output = options['output']
    type = options['type']
    where = options['where']
    wpt = flags['w']
    rte = flags['r']
    trk = flags['t']

    nflags = len(filter(None, [wpt, rte, trk]))
    if nflags > 1:
	grass.fatal(_("One feature at a time please."))
    if nflags < 1:
	grass.fatal(_("No features requested for export."))

    # set some reasonable defaults
    if not type:
	if wpt:
	    type = 'point'
	else:
	    type = 'line'

    #### check for gpsbabel
    ### FIXME: may need --help or similar?
    if not grass.find_program("gpsbabel"):
	grass.fatal(_("The gpsbabel program was not found, please install it first.\n") +
		    "http://gpsbabel.sourceforge.net")

    #### check for cs2cs
    if not grass.find_program("cs2cs"):
	grass.fatal(_("The cs2cs program was not found, please install it first.\n") +
		    "http://proj.osgeo.org")

    # check if we will overwrite data
    if os.path.exists(output) and not grass.overwrite():
	grass.fatal(_("Output file already exists."))

    #### set temporary files
    tmp = grass.tempfile()

    # SQL extract if needed
    if where:
	grass.verbose("Extracting data ...")
	tmp_extr = "tmp_vogb_extr_%d" % os.getpid()
	ret = grass.run_command('v.extract', input = "$GIS_OPT_INPUT",
				output = tmp_extr, type = type, layer = layer,
				where = where, quiet = True)
	if ret != 0:
	    grass.fatal(_("Error executing SQL query"))

	kv = grass.vector_info_topo(tmp_extr)
	if kv['primitives'] == 0:
	    grass.fatal(_("SQL query returned an empty map (no %s features?)") % type)

	inmap = tmp_extr
    else:
	#   g.copy "$GIS_OPT_INPUT,tmp_vogb_extr_$$"   # to get a copy of DB into local mapset
	#   INMAP="tmp_vogb_extr_$$"
	inmap = input

    #### set up projection info
    # TODO: check if we are already in ll/WGS84.  If so skip m.proj step.

    # TODO: multi layer will probably fail badly due to sed 's/^ 1   /'
    #   output as old GRASS 4 vector ascii and fight with dig_ascii/?
    #   Change to s/^ \([0-9]   .*\)    /# \1/' ??? mmph.

    # reproject to lat/lon WGS84
    grass.verbose("Reprojecting data ...")

    re1 = re.compile(r'^\([PLBCFKA]\)')
    re2 = re.compile(r'^ 1     ')

    re3 = re.compile(r'\t\([-\.0-9]*\) .*')
    re4 = re.compile(r'^\([-\.0-9]\)')
    re5 = re.compile(r'^#')

    tmp_proj = tmp + ".proj"
    tf = open(tmp_proj, 'w')
    p1 = grass.pipe_command('v.out.ascii', input = inmap, format = 'standard')
    p2 = grass.feed_command('m.proj', input = '-', flags = 'od', quiet = True, stdout = tf)
    tf.close()

    lineno = 0
    for line in p1.stdout:
	lineno += 1
	if lineno < 11:
	    continue
	line = re1.sub(r'#\1', line)
	line = re2.sub(r'# 1  ', line)
	p2.stdin.write(line)

    p2.stdin.close()
    p1.wait()
    p2.wait()

    if p1.returncode != 0 or p2.returncode != 0:
	grass.fatal(_("Error reprojecting data"))

    tmp_vogb = "tmp_vogb_epsg4326_%d" % os.getpid()
    p3 = grass.feed_command('v.in.ascii', out = tmp_vogb, format = 'standard', flags = 'n', quiet = True)
    tf = open(tmp_proj, 'r')

    for line in tf:
	line = re3.sub(r' \1', line)
	line = re4.sub(r' \1', line)
	line = re5.sub('', line)
	p3.stdin.write(line)

    p3.stdin.close()
    tf.close()
    p3.wait()

    if p3.returncode != 0:
	grass.fatal(_("Error reprojecting data"))

    # don't v.db.connect directly as source table will be removed with
    # temporary map in that case. So we make a temp copy of it to work with.
    kv = vector_db(inmap)
    if layer in kv:
	db_params = kv[layer]

	db_table = db_params['table']
	db_key = db_params['key']
	db_database = db_params['database']
	db_driver = db_params['driver']

	ret = grass.run_command('db.copy',
				from_driver = db_driver,
				from_database = db_database,
				from_table = db_table,
				to_table = tmp_vogb)
	if ret != 0:
	    grass.fatal(_("Error copying temporary DB"))

	ret = grass.run_command('v.db.connect', map = tmp_vogb, table = tmp_vogb, quiet = True)
	if ret != 0:
	    grass.fatal(_("Error reconnecting temporary DB"))

    # export as GPX using v.out.ogr
    if trk:
	linetype = "FORCE_GPX_TRACK=YES"
    elif rte:
	linetype = "FORCE_GPX_TRACK=YES"
    else:
	linetype = None

    # BUG: cat is being reported as evelation and attribute output is skipped.
    #   (v.out.ogr DB reading or ->OGR GPX driver bug<-
    #     resolved: see new Create opts at http://www.gdal.org/ogr/drv_gpx.html)
    #   v.out.ogr -> shapefile -> GPX works, but we try to avoid that as it's
    #     lossy. Also that would allow ogr2ogr -a_srs $IN_PROJ -t_srs EPSG:4326
    #     so skip m.proj pains.. if that is done ogr2ogr -s_srs MUST HAVE +wktext
    #     with PROJ.4 terms or else the +nadgrids will be ignored! best to feed
    #     it  IN_PROJ="`g.proj -jf` +wktext"  in that case.

    grass.verbose("Exporting data ...")

    tmp_gpx = tmp + ".gpx"
    ret = grass.run_command('v.out.ogr', input = tmp_vogb, dsn = tmp_gpx,
			    type = type, format = 'GPX', lco = linetype,
			    dsco = "GPX_USE_EXTENSIONS=YES", quiet = True)
    if ret != 0:
	grass.fatal(_("Error exporting data"))

    if format == 'gpx':
	# short circuit, we have what we came for.
	grass.try_remove(output)
	os.rename(tmp_gpx, output)
	grass.verbose("Fast exit.")
	sys.exit()

    # run gpsbabel
    if wpt:
	gtype = '-w'
    elif trk:
	gtype = '-t'
    elif rte:
	gtype = '-r'
    else:
	gtype = ''

    grass.verbose("Running GPSBabel ...")

    ret = grass.call(['gpsbabel',
		      gtype,
		      '-i', 'gpx',
		      '-f', tmp + '.gpx',
		      '-o', format,
		      '-F', output])

    if ret != 0:
	grass.fatal(_("Error running GPSBabel"))

    grass.verbose("Done.")
예제 #17
0
def main():
    check_progs()
    
    inmap = options['input']
    output = options['ldm']
    width = options['width']
    color = options['color']
    graph = options['graph']
    ldm_type = options['type']

    mapset = grass.gisenv()['MAPSET']

    global tmp, nuldev, grass_version
    nuldev = None

    grass_version = grass.version()['version'][0]
    if grass_version != '7':
        grass.fatal(_("Sorry, this script works in GRASS 7.* only"))

    # setup temporary files
    tmp = grass.tempfile()
    
    # check for LatLong location
    if grass.locn_is_latlong() == True:
        grass.fatal("Module works only in locations with cartesian coordinate system")


    # check if input file exists
    if not grass.find_file(inmap, element = 'vector')['file']:
        grass.fatal(_("<%s> does not exist.") % inmap)
        
    # check for lines
    iflines = grass.vector_info_topo(inmap)['lines']
    if iflines == 0:
        grass.fatal(_("Map <%s> has no lines.") % inmap)
    

    # diplay options 
    if flags['x']:
        env = grass.gisenv()
        mon = env.get('MONITOR', None)
        if not mon:
            if not graph:
                grass.fatal(_("Please choose \"graph\" output file with LDM graphics or not use flag \"x\""))

    
    ####### DO IT #######
    # copy input vector map and drop table
    grass.run_command('g.copy', vect = (inmap, 'v_ldm_vect'), quiet = True, stderr = nuldev)
    db = grass.vector_db('v_ldm_vect')
    if db != {}:
        grass.run_command('v.db.droptable', map_ = 'v_ldm_vect', flags = 'f', quiet = True, stderr = nuldev)

    # compute mean center of lines with v.mc.py module
    center_coords = grass.read_command('v.mc.py', input_ = inmap, type_ = 'line',
                                quiet = True, stderr = nuldev).strip()
    mc_x = center_coords.split(' ')[0]
    mc_y = center_coords.split(' ')[1]

    center_coords = str(mc_x) + ',' + str(mc_y)

    ### 
    inmap = 'v_ldm_vect'

    # count lines
    count = grass.vector_info_topo(inmap)['lines']

    # add temp table with azimuths and lengths of lines
    in_cats = inmap + '_cats'    
    grass.run_command('v.category', input_ = inmap, option = 'add', 
                      output = in_cats, quiet = True, stderr = nuldev)
    grass.run_command('v.db.addtable', map_ = in_cats, table = 'tmp_tab', 
                      columns = 'sum_azim double, len double', quiet = True, stderr = nuldev)
    grass.run_command('v.db.connect', map_ = in_cats, table = 'tmp_tab', 
                      flags = 'o', quiet = True, stderr = nuldev)
    grass.run_command('v.to.db', map_ = in_cats, opt = 'azimuth', 
                      columns = 'sum_azim', units = 'radians', quiet = True, stderr = nuldev)
    grass.run_command('v.to.db', map_ = in_cats, opt = 'length',  
                      columns = 'len', units = 'meters', quiet = True, stderr = nuldev)    

    # find end azimuth
    p = grass.pipe_command('v.db.select', map_ = in_cats, columns = 'sum_azim', flags = 'c', quiet = True, stderr = nuldev)
    c = p.communicate()[0].strip().split('\n')

    sin = []
    cos = []
    
    for i in c:
        s1 = math.sin(float(i))
        c1 = math.cos(float(i))
        sin.append(s1)
        cos.append(c1)

    ca_sin = sum(map(float,sin))
    ca_cos = sum(map(float,cos))
    
    atan = math.atan2(ca_sin,ca_cos)
    end_azim = math.degrees(atan)

    # find compass angle    
    if end_azim < 0:
        a2 = -(end_azim)
    if end_azim > 0:
        a2 = end_azim
    if (ca_sin > 0) and (ca_cos > 0):
        comp_angle = a2
    if (ca_sin > 0) and (ca_cos < 0):
        comp_angle = a2
    if (ca_sin < 0) and (ca_cos > 0):
        comp_angle = 360 - a2
    if (ca_sin < 0) and (ca_cos < 0):
        comp_angle = 360 - a2

    # find LDM
    if end_azim < 0:
        a2 = -(end_azim)
    if end_azim > 0:
        a2 = end_azim
    if (ca_sin > 0) and (ca_cos > 0):
        ldm = 90 - a2
    if (ca_sin > 0) and (ca_cos < 0):
        ldm = 450 - a2
    if (ca_sin < 0) and (ca_cos > 0):
        ldm = 90 + a2
    if (ca_sin < 0) and (ca_cos < 0):
        ldm = 90 + a2

    # find circular variance
    sin_pow = math.pow(ca_sin,2) 
    cos_pow = math.pow(ca_cos,2) 

    circ_var = 1-(math.sqrt(sin_pow+cos_pow))/count

    # find start/end points of "mean" line
    end_azim_dms = decimal2dms(end_azim)

    # if end_azim < 0:
    #     end_azim_dms = '-' + (str(end_azim_dms))

    start_azim = 180 - end_azim
    start_azim_dms = decimal2dms(start_azim)
    
    p = grass.pipe_command('v.db.select', map_ = in_cats, columns = 'len',
                           flags = 'c', quiet = True, stderr = nuldev)
    c = p.communicate()[0].strip().split('\n')

    mean_length = sum(map(float,c))/len(c)
    half_length = float(mean_length)/2

    tmp1 = tmp + '.inf'
    inf1 = file(tmp1, 'w')
    print >> inf1, 'N ' + str(end_azim_dms) + ' E ' + str(half_length)
    inf1.close()
    
    end_coords = grass.read_command('m.cogo', input_ = tmp1, output = '-',
                                    coord = center_coords, quiet = True).strip()

    tmp2 = tmp + '.inf2'
    inf2 = file(tmp2, 'w')
    print >> inf2, 'N ' + str(start_azim_dms) + ' W ' + str(half_length)
    inf2.close()

    start_coords = grass.read_command('m.cogo', input_ = tmp2, output = '-',
                                      coord = center_coords, quiet = True).strip()

    # make "arrowhead" symbol
    if flags['x'] or graph:
        tmp3 = tmp + '.arrowhead_1'
        outf3 = file(tmp3, 'w')

        if ldm_type == 'direct':
            t1 = """VERSION 1.0
BOX -0.5 -0.5 0.5 0.5
POLYGON
  RING
  FCOLOR NONE
    LINE
      0 0
      0.3 -1
    END
  END
POLYGON
  RING
  FCOLOR NONE
    LINE
      0 0
      -0.3 -1
    END
  END
END
"""
            outf3.write(t1)
            outf3.close()
    
            gisdbase = grass.gisenv()['GISDBASE']
            location = grass.gisenv()['LOCATION_NAME']
            mapset = grass.gisenv()['MAPSET']
            symbols_dir = os.path.join(gisdbase, location, mapset, 'symbol', 'arrows')
            symbol = os.path.join(symbols_dir, 'arrowhead_1')
    
            if not os.path.exists(symbols_dir):
                try:
                    os.makedirs(symbols_dir)
                except OSError:
                    pass
        
            if not os.path.isfile(symbol):
                shutil.copyfile(tmp3, symbol)
        
    
        # write LDM graph file and optionally display line of LDM with an arrow
    tmp4 = tmp + '.ldm'
    outf4 = file(tmp4, 'w')
    
    arrow_size = int(width) * 1.4
    arrow_azim = 360 - float(end_azim)

    if ldm_type == 'direct':
        t2 = string.Template("""
move $start_coords
width $width
color $color
draw $end_coords

rotation $arrow_azim
width $width
symbol $symbol_s $arrow_size $end_coords $color
""")    
        s2 = t2.substitute(start_coords = start_coords, width = width, color = color,
                       end_coords = end_coords, arrow_azim = arrow_azim,
                       symbol_s = "arrows/arrowhead_1", arrow_size = arrow_size)
    else:
        t2 = string.Template("""
move $start_coords
width $width
color $color
draw $end_coords
""")    
        s2 = t2.substitute(start_coords = start_coords, width = width, color = color,
                       end_coords = end_coords)

    outf4.write(s2)
    outf4.close()

    if graph:
        shutil.copy(tmp4, graph)



    # save LDM line to vector if option "output" set  
    if output:
        tmp5 = tmp + '.line'
        outf5 = file(tmp5, 'w')

        print >> outf5, str(start_coords)
        print >> outf5, str(end_coords)

        outf5.close()

        grass.run_command('v.in.lines', input_ = tmp5, output = output,
                              separator = " ", overwrite = True, quiet = True)

        out_cats = output + '_cats'
        grass.run_command('v.category', input_ = output, option = 'add', 
                          output = out_cats, quiet = True, stderr = nuldev)
        grass.run_command('g.rename', vect = (out_cats,output), 
                          overwrite = True, quiet = True, stderr = nuldev)
        
        if circ_var:
            col = 'comp_angle double,dir_mean double,cir_var double,ave_x double,ave_y double,ave_len double'
        else:
            col = 'comp_angle double,dir_mean double,ave_x double,ave_y double,ave_len double'
                        
        grass.run_command('v.db.addtable', map_ = output, columns = col, quiet = True, stderr = nuldev)

        tmp6 = tmp + '.sql'
        outf6 = file(tmp6, 'w')
                
        t3 = string.Template("""
UPDATE $output SET comp_angle = $comp_angle;
UPDATE $output SET dir_mean = $ldm;
UPDATE $output SET ave_x = $mc_x;
UPDATE $output SET ave_y = $mc_y;
UPDATE $output SET ave_len = $mean_length;
""")
        s3 = t3.substitute(output = output, comp_angle = ("%0.3f" % comp_angle),
                           ldm = ("%0.3f" % ldm), mc_x = ("%0.3f" % float(mc_x)),
                           mc_y = ("%0.3f" % float(mc_y)), mean_length = ("%0.3f" % mean_length))
        outf6.write(s3)

        if circ_var:
            print >> outf6, "UPDATE %s SET cir_var = %0.3f;" % (output, circ_var)

        outf6.close()

        grass.run_command('db.execute', input_ = tmp6, quiet = True, stderr = nuldev)


    # print LDM parameters to stdout (with <-g> flag in shell style):
    print_out = ['Compass Angle', 'Directional Mean', 'Average Center', 'Average Length']
    if circ_var:
        print_out.append('Circular Variance')
        
    print_shell = ['compass_angle', 'directional_mean', 'average_center',
                   'average_length', 'circular_variance']
    if circ_var:
        print_shell.append('circular_variance')
        
    print_vars = ["%0.3f" % comp_angle, "%0.3f" % ldm,
                  "%0.3f" % float(mc_x) + ',' + "%0.3f" % float(mc_y),
                  "%0.3f" % mean_length]
    if circ_var:
        print_vars.append("%0.3f" % circ_var)


    if flags['g']:
        for i,j in zip(print_shell, print_vars):
            print "%s=%s" % (i, j)
    else:
        for i,j in zip(print_out, print_vars):
            print "%s: %s" % (i, j)


    # diplay LDM graphics
    if flags['x']:
        if mon:
            if graph:
                grass.run_command('d.graph', input_ = graph, flags = 'm', quiet = True, stderr = nuldev)
            else:
                grass.run_command('d.graph', input_ = tmp4, flags = 'm', quiet = True, stderr = nuldev)
        elif graph:
            grass.message(_("\n Use this command in wxGUI \"Command console\" or with <d.mon> or with \"command layer\" to display LDM graphics: \n d.graph -m input=%s \n\n" ) % graph)
예제 #18
0
# Import the points from the vector location and set the region bounds
print "Importing points from vector location"
v.proj(input=vector_layer, location=vector_location, mapset="PERMANENT")
g.run_command("g.region", vect=vector_layer)

# Build the Heatmap
print "Building heatmap: %s" % raster_layer
v.kernel(input=vector_layer, output=raster_layer, radius=radius)

# Generate the layer statistics and extract the maximum value
# Since we're doing some sorting ourselves, we'll be using r.stats
# instead of r.report
print "Getting layer stats"
vals = []
output = g.pipe_command('r.stats', input=raster_layer)
for line in output.stdout:
    vals.append(float(line.rstrip('\r\n').split('-')[1]))

print "Layer maximum value: %s" % vals[-1]

# Recode the raster using these rules
# A better solution would be to read in a rules file, but I'm too lazy.
# If you want to do that, you'll need to specify a file location
#   and remove the write_command call.
# We're also going to be really snarky and recode to the maximum int value (255).
    #That's mostly because I don't know how to programmatically
    #rescale a float to an int
print "Recoding raster layer"
rules = "0.0:" + str(vals[-1]) + ":0:255"
#r.recode(input=raster_layer, rules=rules, output=recoded_raster_layer)
예제 #19
0
def main():
    global far_edge_value
    global d_max

    options, flags = gscript.parser()
    high = options["high"]
    low = options["low"]
    output = options["output"]
    far_edge = float(options["far_edge"])
    inter_points = int(options["inter_points"])
    use_average_differences = flags["a"]

    if high is None or high == "":
        gscript.error(_("[r.mblend] ERROR: high is a mandatory parameter."))
        exit()

    if low is None or low == "":
        gscript.error(_("[r.mblend] ERROR: low is a mandatory parameter."))
        exit()

    if output is None or output == "":
        gscript.error(_("[r.mblend] ERROR: output is a mandatory parameter."))
        exit()

    if far_edge < 0 or far_edge > 100:
        gscript.error(
            _("[r.mblend] ERROR: far_edge must be a percentage",
              " between 0 and 100."))
        exit()

    if inter_points < 0:
        gscript.error(
            _("[r.mblend] ERROR: inter_points must be a positive",
              " integer."))
        exit()

    # Set the region to the two input rasters
    gscript.run_command("g.region", raster=high + "," + low)
    # Determine cell side
    region = gscript.region()
    if region["nsres"] > region["ewres"]:
        cell_side = region["nsres"]
    else:
        cell_side = region["ewres"]

    compute_d_max(region)

    # Make cell size compatible
    low_res_inter = getTemporaryIdentifier()
    gscript.message(
        _("[r.mblend] Resampling low resolution raster to higher" +
          " resolution"))
    gscript.run_command("r.resamp.interp",
                        input=low,
                        output=low_res_inter,
                        method="nearest")

    # Obtain extent to interpolate
    low_extent_rast = getTemporaryIdentifier()
    high_extent_rast = getTemporaryIdentifier()
    low_extent = getTemporaryIdentifier()
    high_extent = getTemporaryIdentifier()
    interpol_area = getTemporaryIdentifier()
    gscript.message(_("[r.mblend] Multiplying low resolution by zero"))
    gscript.mapcalc(low_extent_rast + " = " + low + " * 0")
    gscript.message(_("[r.mblend] Multiplying high resolution by zero"))
    gscript.mapcalc(high_extent_rast + " = " + high + " * 0")
    gscript.message(_("[r.mblend] Computing extent of low resolution"))
    gscript.run_command("r.to.vect",
                        input=low_extent_rast,
                        output=low_extent,
                        type="area")
    gscript.message(_("[r.mblend] Computing extent of high resolution"))
    gscript.run_command("r.to.vect",
                        input=high_extent_rast,
                        output=high_extent,
                        type="area")
    gscript.message(_("[r.mblend] Computing area to interpolate"))
    gscript.run_command(
        "v.overlay",
        ainput=low_extent,
        binput=high_extent,
        output=interpol_area,
        operator="not",
    )

    # Compute difference between the two rasters and vectorise to points
    interpol_area_buff = getTemporaryIdentifier()
    diff = getTemporaryIdentifier()
    diff_points_edge = getTemporaryIdentifier()
    gscript.mapcalc(diff + " = " + high + " - " + low_res_inter)
    gscript.message(_("[r.mblend] Computing buffer around interpolation area"))
    gscript.run_command(
        "v.buffer",
        input=interpol_area,
        output=interpol_area_buff,
        type="area",
        distance=cell_side,
    )
    gscript.message(
        _("[r.mblend] Vectorising differences between input" + " rasters"))
    gscript.run_command("r.mask", vector=interpol_area_buff)
    gscript.run_command("r.to.vect",
                        input=diff,
                        output=diff_points_edge,
                        type="point")
    gscript.run_command("r.mask", flags="r")

    # Compute average of the differences if flag -a was passed
    if use_average_differences:
        p = gscript.pipe_command("r.univar", map=diff)
        result = {}
        for line in p.stdout:
            vector = line.split(": ")
            if vector[0] == "mean":
                print("Found it: " + vector[1])
                far_edge_value = vector[1]
        p.wait()

    # Get points in low resolution farther away from high resolution raster
    dist_high = getTemporaryIdentifier()
    weights = getTemporaryIdentifier()
    interpol_area_points = getTemporaryIdentifier()
    pre_interpol_area_points = getTemporaryIdentifier()
    weight_points = getTemporaryIdentifier()
    interpol_area_in_buff = getTemporaryIdentifier()
    weight_points_all_edges = getTemporaryIdentifier()
    weight_points_edge = getTemporaryIdentifier()
    # 1. Distance to High resolution raster
    gscript.message(
        _("[r.mblend] Computing distance to high resolution" + " raster"))
    gscript.run_command("r.grow.distance", input=high, distance=dist_high)
    # 2. Rescale to the interval [0,10000]: these are the weights
    gscript.message(_("[r.mblend] Rescaling distance to [0,10000] interval"))
    gscript.run_command("r.rescale",
                        input=dist_high,
                        output=weights,
                        to="0," + str(WEIGHT_MAX))
    # 3. Extract points from interpolation area border
    gscript.message(
        _("[r.mblend] Extract points from interpolation area " + "boundary"))
    gscript.run_command(
        "v.to.points",
        input=interpol_area,
        output=pre_interpol_area_points,
        type="boundary",
        dmax=d_max,
        layer="-1",
    )
    gscript.message(_("[r.mblend] Copying features to layer 1"))
    gscript.run_command(
        "v.category",
        input=pre_interpol_area_points,
        output=interpol_area_points,
        option="chlayer",
        layer="2,1",
    )
    gscript.message(_("[r.mblend] Linking attribute table to layer 1"))
    gscript.run_command(
        "v.db.connect",
        map=interpol_area_points,
        table=interpol_area_points,
        layer="1",
        flags="o",
    )
    # 4. Query distances to interpolation area points
    gscript.message(_("[r.mblend] Querying distances raster"))
    gscript.run_command("v.what.rast",
                        map=interpol_area_points,
                        raster=weights,
                        column=COL_VALUE)
    # 5. Select those with higher weights
    cut_off = str(far_edge / 100 * WEIGHT_MAX)
    gscript.message(
        _("[r.mblend] Selecting far edge points (using cut-off" +
          " percentage)"))
    gscript.run_command(
        "v.extract",
        input=interpol_area_points,
        output=weight_points_edge,
        where=COL_VALUE + ">" + cut_off,
    )

    # Merge the two point edges and set low res edge to zero
    points_edges = getTemporaryIdentifier()
    gscript.message(_("[r.mblend] Dropping extra column from far edge"))
    gscript.run_command("v.db.dropcolumn",
                        map=weight_points_edge,
                        layer="1",
                        columns="along")
    gscript.message(_("[r.mblend] Setting far edge weights to zero"))
    gscript.run_command("v.db.update",
                        map=weight_points_edge,
                        column=COL_VALUE,
                        value=far_edge_value)
    gscript.message(_("[r.mblend] Patching the two edges"))
    gscript.run_command(
        "v.patch",
        input=diff_points_edge + "," + weight_points_edge,
        output=points_edges,
        flags="e",
    )

    # Interpolate smoothing raster
    smoothing = getTemporaryIdentifier()
    interpol_area_rst = getTemporaryIdentifier()
    # Consign region to interpolation area
    gscript.run_command("g.region", vector=interpol_area_buff)
    gscript.message(
        _("[r.mblend] Interpolating smoothing surface. This" +
          " might take a while..."))
    gscript.run_command(
        "v.surf.idw",
        input=points_edges,
        column=COL_VALUE,
        output=smoothing,
        power=2,
        npoints=inter_points,
    )
    # Reset region to full extent
    gscript.run_command("g.region", raster=high + "," + low)

    # Apply stitching
    smooth_low_res = getTemporaryIdentifier()
    # Sum to low res
    gscript.message(_("[r.mblend] Applying smoothing surface"))
    gscript.mapcalc(smooth_low_res + " = " + low_res_inter + " + " + smoothing)
    # Add both rasters
    try:
        gscript.message(_("[r.mblend] Joining result into a single raster"))
        gscript.run_command("r.patch",
                            input=high + "," + smooth_low_res,
                            output=output)
    except Exception as ex:
        gscript.error(_("[r.mblend] ERROR: Failed to create smoothed raster."))
        exit()

    gscript.message(_("[r.mblend] SUCCESS: smoothed raster created."))
예제 #20
0
fn_out = prefix + '_' + year + '_' + suffix + '.tif'

t = time.time()

#for f in glob.glob('*.tif'):
#    grass.run_command('r.in.gdal', input=f, output=f.replace('.tif',''))
    
t_import = time.time() - t

#for f in glob.glob('*.tif'):
#    grass.run_command('r.null', map=f.replace('.tif',''), setnull="249,250,251,252,253,254,255")
#    
t_nodata = time.time() - t - t_import

result = ''
p = grass.pipe_command('g.mlist', type = 'rast', quiet=True, pattern=year + '*')
for line in p.stdout:
    result = result + ',' + line.replace('\n','')

input_rast_list = result.strip(',')

grass.run_command('r.series', input=input_rast_list, output='dh1_' + year + ',dh2_' + year + ',ave_' + year + ',std_' + year, method='sum,minimum,average,stddev')
grass.mapcalc('dh3_' + year + ' = std_' + year + '/ave_' + year)
t_calc = time.time() - t - t_import - t_nodata

grass.run_command('i.group', group='rgb_group_' + year, input='dh1_' + year + ',dh2_' + year + ',dh3_' + year)
grass.run_command('r.out.gdal', input='rgb_group_' + year, output=fn_out, type='Float32', createopt='PROFILE=BASELINE,INTERLEAVE=PIXEL,TFW=YES')
shutil.move(fn_out,od + fn_out)
shutil.move(fn_out.replace('.tif','.tfw'),od + fn_out.replace('.tif','.tfw'))
shutil.move(fn_out + '.aux.xml',od + fn_out + '.aux.xml')
cmd = "gdal_edit -a_srs \"EPSG:4326\" " + od + fn_out
예제 #21
0
def do_it_all(global_vars, target_pts_np):
    """Conduct weighted and parametrised partial viewshed and cummulate it with
    the previous partial viewsheds
    :param target_pts_np: Array of target points in global coordinate system
    :type target_pts_np: ndarray
    :return: 2D array of weighted parametrised cummulative viewshed
    :rtype: ndarray
    """
    # Set counter
    counter = 1

    # Get variables out of global_vars dictionary
    reg = global_vars["region"]
    exp_range = global_vars["range"]
    flagstring = global_vars["flagstring"]
    r_dsm = global_vars["r_dsm"]
    v_elevation = global_vars["observer_elevation"]
    refr_coeff = global_vars["refr_coeff"]
    memory = global_vars["memory"]
    parametrise_viewshed = global_vars["param_viewshed"]
    dsm_type = global_vars["dsm_type"]
    b_1 = global_vars["b_1"]
    cores = global_vars["cores"]
    tempname = global_vars["tempname"]

    # Create empty viewshed
    np_cum = np.empty((reg.rows, reg.cols), dtype=np.single)
    np_cum[:] = np.nan
    tmp_vs = "{}_{}".format(tempname, os.getpid())

    for target_pnt in target_pts_np:

        # Display a progress info message
        grass.percent(counter, len(target_pts_np), 1)
        grass.verbose("Processing point {i} ({p:.1%})".format(
            i=int(target_pnt[0]), p=counter / len(target_pts_np)))

        # Global coordinates and attributes of target point T
        t_glob = target_pnt[1:]

        # ======================================================================
        # 1. Set local computational region: +/- exp_range from target point
        # ======================================================================
        # compute position of target point within a pixel
        delta_n = math.ceil(
            (t_glob[1] - reg.south) / reg.nsres) * reg.nsres - (t_glob[1] -
                                                                reg.south)
        delta_s = (t_glob[1] - reg.south) - math.floor(
            (t_glob[1] - reg.south) / reg.nsres) * reg.nsres
        delta_e = math.ceil(
            (t_glob[0] - reg.west) / reg.ewres) * reg.ewres - (t_glob[0] -
                                                               reg.west)
        delta_w = (t_glob[0] - reg.west) - math.floor(
            (t_glob[0] - reg.west) / reg.ewres) * reg.ewres

        # ensure that local region doesn't exceed global region
        loc_reg_n = min(t_glob[1] + exp_range + delta_n, reg.north)
        loc_reg_s = max(t_glob[1] - exp_range - delta_s, reg.south)
        loc_reg_e = min(t_glob[0] + exp_range + delta_e, reg.east)
        loc_reg_w = max(t_glob[0] - exp_range - delta_w, reg.west)

        # pygrass sets region for pygrass tasks
        lreg = deepcopy(reg)
        lreg.set_bbox(Bbox(loc_reg_n, loc_reg_s, loc_reg_e, loc_reg_w))
        lreg.set_raster_region()

        # Create processing environment with region information
        c_env = os.environ.copy()
        c_env["GRASS_REGION"] = grass.region_env(n=loc_reg_n,
                                                 s=loc_reg_s,
                                                 e=loc_reg_e,
                                                 w=loc_reg_w)

        lreg_shape = [lreg.rows, lreg.cols]

        # ======================================================================
        # 2. Calculate binary viewshed and convert to numpy
        # ======================================================================
        vs = grass.pipe_command(
            "r.viewshed",
            flags="b" + flagstring,
            input=r_dsm,
            output=tmp_vs,
            coordinates="{},{}".format(t_glob[0], t_glob[1]),
            observer_elevation=0.0,
            target_elevation=v_elevation,
            max_distance=exp_range,
            refraction_coeff=refr_coeff,
            memory=int(round(memory / cores)),
            quiet=True,
            overwrite=True,
            env=c_env,
        )
        vs.communicate()
        # Workaround for https://github.com/OSGeo/grass/issues/1436
        clean_temp(vs.pid)

        # Read viewshed into numpy with single precision and replace NoData
        np_viewshed = raster2numpy(tmp_vs).astype(np.single)
        np_viewshed[np_viewshed == -2147483648] = np.nan

        # ======================================================================
        # 3. Prepare local coordinates and attributes of target point T
        # ======================================================================
        # Calculate how much of rows/cols of local region lies
        # outside global region
        o_1 = [
            max(t_glob[1] + exp_range + reg.nsres / 2 - reg.north, 0),
            max(reg.west - (t_glob[0] - exp_range - reg.ewres / 2), 0),
        ]

        t_loc = np.append(
            np.array([
                exp_range / reg.nsres + 0.5 - o_1[0] / reg.nsres,
                exp_range / reg.ewres + 0.5 - o_1[1] / reg.ewres,
            ]),
            t_glob[2:],
        )

        # ======================================================================
        # 4. Parametrise viewshed
        # ======================================================================
        np_viewshed = parametrise_viewshed(
            lreg_shape,
            t_loc,
            np_viewshed,
            reg,
            exp_range,
            r_dsm,
            dsm_type,
            v_elevation,
            b_1,
        ).astype(np.single)

        # ======================================================================
        # 5. Cummulate viewsheds
        # ======================================================================
        # Determine position of local parametrised viewshed within
        # global cummulative viewshed
        o_2 = [
            int(round((reg.north - loc_reg_n) / reg.nsres)),  # NS (rows)
            int(round((loc_reg_w - reg.west) / reg.ewres)),  # EW (cols)
        ]

        # Add local parametrised viewshed to global cummulative viewshed
        # replace nans with 0 in processed regions, keep nan where both are nan
        all_nan = np.all(
            np.isnan([
                np_cum[o_2[0]:o_2[0] + lreg_shape[0],
                       o_2[1]:o_2[1] + lreg_shape[1]],
                np_viewshed,
            ]),
            axis=0,
        )

        np_cum[o_2[0]:o_2[0] + lreg_shape[0],
               o_2[1]:o_2[1] + lreg_shape[1]] = np.nansum(
                   [
                       np_cum[o_2[0]:o_2[0] + lreg_shape[0],
                              o_2[1]:o_2[1] + lreg_shape[1]],
                       np_viewshed,
                   ],
                   axis=0,
               )

        np_cum[o_2[0]:o_2[0] + lreg_shape[0],
               o_2[1]:o_2[1] + lreg_shape[1]][all_nan] = np.nan

        counter += 1

    return np_cum
예제 #22
0
파일: region.py 프로젝트: caomw/grass
 def __unicode__(self):
     return grass.pipe_command("g.region", flags="pu").communicate()[0]
예제 #23
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, rastertmp
    rastertmp = False
    # setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = open(os.devnull, 'w')

    rasters = options['raster'].split(',')
    colprefixes = options['column_prefix'].split(',')
    vector = options['map']
    layer = options['layer']
    percentile = options['percentile']
    basecols = options['method'].split(',')

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
        vect_mapset = vs[1]
    else:
        vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector',
                                                    mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # colprefix for every raster map?
    if len(colprefixes) != len(rasters):
        grass.fatal(
            _("Number of raster maps ({0}) different from \
                      number of column prefixes ({1})".format(
                len(rasters), len(colprefixes))))

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    for raster in rasters:
        # check the input raster map
        if not grass.find_file(raster, 'cell')['file']:
            grass.fatal(_("Raster map <%s> not found") % raster)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align=rasters[0])

    # prepare base raster for zonal statistics
    try:
        nlines = grass.vector_info_topo(vector)['lines']
        # Create densified lines rather than thin lines
        if flags['d'] and nlines > 0:
            grass.run_command('v.to.rast',
                              input=vector,
                              layer=layer,
                              output=rastertmp,
                              use='cat',
                              flags='d',
                              quiet=True)
        else:
            grass.run_command('v.to.rast',
                              input=vector,
                              layer=layer,
                              output=rastertmp,
                              use='cat',
                              quiet=True)
    except CalledModuleError:
        grass.fatal(_("An error occurred while converting vector to raster"))

    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command('r.category', map=rastertmp, sep=';', quiet=True)
    cats = []

    for line in p.stdout:
        line = decode(line)
        cats.append(line.rstrip('\r\n').split(';')[0])
    p.wait()

    number = len(cats)
    if number < 1:
        grass.fatal(_("No categories found in raster map"))

    # Check if all categories got converted
    # Report categories from vector map
    vect_cats = grass.read_command('v.category',
                                   input=vector,
                                   option='report',
                                   flags='g').rstrip('\n').split('\n')

    # get number of all categories in selected layer
    for vcl in vect_cats:
        if vcl.split(' ')[0] == layer and vcl.split(' ')[1] == 'all':
            vect_cats_n = int(vcl.split(' ')[2])

    if vect_cats_n != number:
        grass.warning(
            _("Not all vector categories converted to raster. \
                         Converted {0} of {1}.".format(number, vect_cats_n)))

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))

    # replaced by user choiche
    #basecols = ['n', 'min', 'max', 'range', 'mean', 'stddev', 'variance', 'cf_var', 'sum']

    for i in xrange(len(rasters)):
        raster = rasters[i]
        colprefix = colprefixes[i]
        # we need at least three chars to distinguish [mea]n from [med]ian
        # so colprefix can't be longer than 6 chars with DBF driver
        if dbfdriver:
            colprefix = colprefix[:6]
            variables_dbf = {}

        # by default perccol variable is used only for "variables" variable
        perccol = "percentile"
        perc = None
        for b in basecols:
            if b.startswith('p'):
                perc = b
        if perc:
            # namespace is limited in DBF but the % value is important
            if dbfdriver:
                perccol = "per" + percentile
            else:
                perccol = "percentile_" + percentile
            percindex = basecols.index(perc)
            basecols[percindex] = perccol

        # dictionary with name of methods and position in "r.univar -gt"  output
        variables = {
            'number': 2,
            'null_cells': 2,
            'minimum': 4,
            'maximum': 5,
            'range': 6,
            'average': 7,
            'stddev': 9,
            'variance': 10,
            'coeff_var': 11,
            'sum': 12,
            'first_quartile': 14,
            'median': 15,
            'third_quartile': 16,
            perccol: 17
        }
        # this list is used to set the 'e' flag for r.univar
        extracols = ['first_quartile', 'median', 'third_quartile', perccol]
        addcols = []
        colnames = []
        extstat = ""
        for i in basecols:
            # this check the complete name of out input that should be truncated
            for k in variables.keys():
                if i in k:
                    i = k
                    break
            if i in extracols:
                extstat = 'e'
            # check if column already present
            currcolumn = ("%s_%s" % (colprefix, i))
            if dbfdriver:
                currcolumn = currcolumn[:10]
                variables_dbf[currcolumn.replace("%s_" % colprefix, '')] = i

            colnames.append(currcolumn)
            if currcolumn in grass.vector_columns(vector, layer).keys():
                if not flags['c']:
                    grass.fatal(
                        (_("Cannot create column <%s> (already present). ") %
                         currcolumn) +
                        _("Use -c flag to update values in this column."))
            else:
                if i == "n":
                    coltype = "INTEGER"
                else:
                    coltype = "DOUBLE PRECISION"
                addcols.append(currcolumn + ' ' + coltype)

        if addcols:
            grass.verbose(_("Adding columns '%s'") % addcols)
            try:
                grass.run_command('v.db.addcolumn',
                                  map=vector,
                                  columns=addcols,
                                  layer=layer)
            except CalledModuleError:
                grass.fatal(_("Adding columns failed. Exiting."))

        # calculate statistics:
        grass.message(_("Processing input data (%d categories)...") % number)

        # get rid of any earlier attempts
        grass.try_remove(sqltmp)

        f = open(sqltmp, 'w')

        # do the stats
        p = grass.pipe_command('r.univar',
                               flags='t' + extstat,
                               map=raster,
                               zones=rastertmp,
                               percentile=percentile,
                               sep=';')

        first_line = 1

        f.write("{0}\n".format(grass.db_begin_transaction(fi['driver'])))
        for line in p.stdout:
            if first_line:
                first_line = 0
                continue

            vars = decode(line).rstrip('\r\n').split(';')

            f.write("UPDATE %s SET" % fi['table'])
            first_var = 1
            for colname in colnames:
                variable = colname.replace("%s_" % colprefix, '', 1)
                if dbfdriver:
                    variable = variables_dbf[variable]
                i = variables[variable]
                value = vars[i]
                # convert nan, +nan, -nan, inf, +inf, -inf, Infinity, +Infinity,
                # -Infinity to NULL
                if value.lower().endswith('nan') or 'inf' in value.lower():
                    value = 'NULL'
                if not first_var:
                    f.write(" , ")
                else:
                    first_var = 0
                f.write(" %s=%s" % (colname, value))

            f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))
        f.write("{0}\n".format(grass.db_commit_transaction(fi['driver'])))
        p.wait()
        f.close()

        grass.message(_("Updating the database ..."))
        exitcode = 0
        try:
            grass.run_command('db.execute',
                              input=sqltmp,
                              database=fi['database'],
                              driver=fi['driver'])
            grass.verbose(
                (_("Statistics calculated from raster map <{raster}>"
                   " and uploaded to attribute table"
                   " of vector map <{vector}>.").format(raster=raster,
                                                        vector=vector)))
        except CalledModuleError:
            grass.warning(
                _("Failed to upload statistics to attribute table of vector map <%s>."
                  ) % vector)
            exitcode = 1

            sys.exit(exitcode)
예제 #24
0
def calculate_lfp(input, output, coords):
    prefix = "r_lfp_%d_" % os.getpid()

    # create the outlet vector map
    outlet = prefix + "outlet"
    p = grass.feed_command("v.in.ascii",
                           overwrite=True,
                           input="-",
                           output=outlet,
                           separator=",")
    p.stdin.write(coords)
    p.stdin.close()
    p.wait()
    if p.returncode != 0:
        grass.fatal(_("Cannot create the outlet vector map"))

    # convert the outlet vector map to raster
    try:
        grass.run_command("v.to.rast",
                          overwrite=True,
                          input=outlet,
                          output=outlet,
                          use="cat",
                          type="point")
    except CalledModuleError:
        grass.fatal(_("Cannot convert the outlet vector to raster"))

    # calculate the downstream flow length
    flds = prefix + "flds"
    try:
        grass.run_command("r.stream.distance",
                          overwrite=True,
                          flags="om",
                          stream_rast=outlet,
                          direction=input,
                          method="downstream",
                          distance=flds)
    except CalledModuleError:
        grass.fatal(_("Cannot calculate the downstream flow length"))

    # calculate the upstream flow length
    flus = prefix + "flus"
    try:
        grass.run_command("r.stream.distance",
                          overwrite=True,
                          flags="o",
                          stream_rast=outlet,
                          direction=input,
                          method="upstream",
                          distance=flus)
    except CalledModuleError:
        grass.fatal(_("Cannot calculate the upstream flow length"))

    # calculate the sum of downstream and upstream flow lengths
    fldsus = prefix + "fldsus"
    try:
        grass.run_command("r.mapcalc",
                          overwrite=True,
                          expression="%s=%s+%s" % (fldsus, flds, flus))
    except CalledModuleError:
        grass.fatal(
            _("Cannot calculate the sum of downstream and upstream flow lengths"
              ))

    # find the longest flow length
    p = grass.pipe_command("r.info", flags="r", map=fldsus)
    max = ""
    for line in p.stdout:
        line = line.rstrip("\n")
        if line.startswith("max="):
            max = line.split("=")[1]
            break
    p.wait()
    if p.returncode != 0 or max == "":
        grass.fatal(_("Cannot find the longest flow length"))

    min = float(max) - 0.0005

    # extract the longest flow path
    lfp = prefix + "lfp"
    try:
        grass.run_command("r.mapcalc",
                          overwrite=True,
                          expression="%s=if(%s>=%f, 1, null())" %
                          (lfp, fldsus, min))
    except CalledModuleError:
        grass.fatal(_("Cannot create the longest flow path raster map"))

    # thin the longest flow path raster map
    try:
        grass.run_command("r.thin", input=lfp, output=output)
    except CalledModuleError:
        grass.fatal(_("Cannot thin the longest flow path raster map"))

    # remove intermediate outputs
    grass.run_command("g.remove",
                      flags="f",
                      type="raster,vector",
                      pattern="%s*" % prefix)

    # write metadata
    tmphist = grass.tempfile()
    f = open(tmphist, "w+")
    f.write(os.environ["CMDLINE"])
    f.close()

    grass.run_command("r.support",
                      map=output,
                      title="Longest flow path",
                      loadhistory=tmphist,
                      description="generated by r.lfp")
    grass.try_remove(tmphist)
예제 #25
0
def main():
    global tmp, nuldev
    nuldev = file(os.devnull, 'w')

    ## setup temporary files
    tmp = grass.tempfile()
    tmpf = 'v_net_neighbors'

    inmap = options['input']
    outfile = options['dump']

    # check if input file exists
    if not grass.find_file(inmap, element = 'vector')['file']:
        grass.fatal(_("<%s> does not exist.") % inmap)

    # check for table in net vector map
    try:
        f = grass.vector_db(inmap)[2]
    except KeyError:
        grass.run_command('v.db.addtable', _map = inmap, layer = 2,
                          quiet = True, stderr = nuldev)
        
    ## extract temp nodes and lines
    nodes = tmpf + '_nodes'
    lines = tmpf + '_lines'

    iflines = grass.vector_info_topo(inmap)['lines']
    ifbounds = grass.vector_info_topo(inmap)['boundaries']
    if iflines != 0:
        vect_type = 'line'
    if ifbounds != 0:
        vect_type = 'boundary'
    
    if iflines != 0 and ifbounds != 0:
        grass.fatal(_("Input net vector map must have lines OR boundaries, not both"))

    grass.run_command('v.extract', input = inmap, output = nodes, layer = 2, 
                      _type = 'point', flags = 't', quiet = True, stderr = nuldev)
    grass.run_command('v.extract', input = inmap, output = lines,  
                      _type = vect_type, flags = 't', quiet = True, stderr = nuldev)

    ## filter nodes on line intersections if with '-i' flag 
    if flags['i']:
        p = grass.pipe_command('v.net', _input = inmap, operation = 'nreport',
                               quiet = True, stderr = nuldev)
        c  = p.communicate()[0].strip().split('\n')
        filt = [elem for elem in c if ',' in elem]

        fnodes = []
        
        for x in filt:
            spl = x.split(' ')
            fnodes.append(spl[0])

        fnodes_str = ','.join(fnodes)
        
        nsel = tmpf + '_nsel'
        grass.run_command('v.extract', _input = nodes, _list = fnodes_str, 
                          output = nsel, layer = 2, _type = 'point',
                          quiet = True, stderr = nuldev)

        grass.run_command('g.rename', vect = (nsel,nodes), overwrite = True, 
                          quiet = True, stderr = nuldev)
        
        
    if not flags['p']:
        grass.run_command('v.db.addcol', _map = inmap, layer = 2, 
                          columns = 'neigh_node varchar(255)', 
                          quiet = True, stderr = nuldev)

    ## main cycle (extract every node and make 2 selects)
    out_dict = {}

    p = grass.pipe_command('v.category', _input = nodes, opt = 'print', layer = 2, 
                           _type = 'point', quiet = True, stderr = nuldev)
    c = p.communicate()[0].strip().split('\n')
    for cat in c:
        icat = int(cat)

        nnode = nodes + cat
        grass.run_command('v.extract', _input = nodes, _list = cat, 
                          output = nnode, layer = 2, _type = 'point',
                          flags = 't', quiet = True, stderr = nuldev)

        linode = nnode + '_lines'
        grass.run_command('v.select', ain = lines, _bin = nnode, 
                          blayer = 2, output = linode, 
                          operator = 'overlap', quiet = True, stderr = nuldev)
        
        linode_pts = linode + '_pts'
        grass.run_command('v.select', ain = nodes, alayer = 2, 
                          _bin = linode, output = linode_pts, 
                          operator = 'overlap', quiet = True, stderr = nuldev)
        
        pcat = grass.pipe_command('v.category', _input = linode_pts, layer = 2,
                                  opt = 'print', quiet = True, stderr = nuldev)
        ccat = pcat.communicate()[0].strip().split('\n')
        ccat.remove(cat)
        
        ncat_list = []
        ncat_list.append(ccat)

        str_db = ','.join(map(str, ncat_list))
        str_db1 = str_db.replace("[", "").replace("]", "").replace("'", "").replace(" ", "")

        out_dict[icat] = str_db1
        
        if not flags['p']:
            grass.run_command('v.db.update', _map = inmap, layer = 2, 
                              column = 'neigh_node', value = '%s' % (str_db1), 
                              where = "cat = %d" % (icat), 
                              quiet = True, stderr = nuldev )
        
    ## output to stdout / file
    tmp3 = tmp + '.out'
    outf2 = file(tmp3, 'w')
    for key in sorted(out_dict.iterkeys()):
        val = out_dict[key]
        print >> outf2, "%s %s" % (key,val)
    outf2.close()
    
    if flags['p']:
        with open(tmp3, 'rb') as f:
            print f.read()

    if outfile:
        shutil.copyfile(tmp3, outfile)
                

    return 0
예제 #26
0
def main():
    mapname = options['map']
    option = options['option']
    layer = options['layer']
    units = options['units']

    nuldev = file(os.devnull, 'w')

    if not grass.find_file(mapname, 'vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % mapname)

    colnames = grass.vector_columns(mapname,
                                    layer,
                                    getDict=False,
                                    stderr=nuldev)
    if not colnames:
        colnames = ['cat']

    if option == 'coor':
        columns = ['dummy1', 'dummy2', 'dummy3']
        extracolnames = ['x', 'y', 'z']
    else:
        columns = ['dummy1']
        extracolnames = [option]

    if units in ['p', 'percent']:
        unitsp = 'meters'
    elif units:
        unitsp = units
    else:
        unitsp = None

    # NOTE: we suppress -1 cat and 0 cat
    if colnames:
        p = grass.pipe_command('v.db.select',
                               quiet=True,
                               flags='c',
                               map=mapname,
                               layer=layer)
        records1 = []
        for line in p.stdout:
            cols = line.rstrip('\r\n').split('|')
            if cols[0] == '0':
                continue
            records1.append([int(cols[0])] + cols[1:])
        p.wait()
        if p.returncode != 0:
            sys.exit(1)

        records1.sort()

        if len(records1) == 0:
            try:
                f = grass.vector_db(map=mapname)[int(layer)]
                grass.fatal(
                    _("There is a table connected to input vector map '%s', but"
                      "there are no categories present in the key column '%s'. Consider using"
                      "v.to.db to correct this.") % (mapname, f['key']))
            except KeyError:
                pass

        #fetch the requested attribute sorted by cat:
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               quiet=True,
                               map=mapname,
                               option=option,
                               columns=columns,
                               layer=layer,
                               units=unitsp)
        records2 = []
        for line in p.stdout:
            fields = line.rstrip('\r\n').split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records2.append([int(fields[0])] + fields[1:-1] +
                            [float(fields[-1])])
        p.wait()
        records2.sort()

        #make pre-table
        # len(records1) may not be the same as len(records2) because
        # v.db.select can return attributes that are not linked to features.
        records3 = []
        for r2 in records2:
            records3.append(
                filter(lambda r1: r1[0] == r2[0], records1)[0] + r2[1:])
    else:
        records1 = []
        p = grass.pipe_command('v.category',
                               inp=mapname,
                               layer=layer,
                               option='print')
        for line in p.stdout:
            field = int(line.rstrip())
            if field > 0:
                records1.append(field)
        p.wait()
        records1.sort()
        records1 = uniq(records1)

        #make pre-table
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               map=mapname,
                               option=option,
                               columns=columns,
                               layer=layer,
                               units=unitsp)
        records3 = []
        for line in p.stdout:
            fields = line.split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records3.append([int(fields[0])] + fields[1:])
        p.wait()
        records3.sort()

    # print table header
    sys.stdout.write('|'.join(colnames + extracolnames) + '\n')

    #make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units != '' and units in ['p', 'percent']:
        # calculate total area value
        areatot = 0
        for r in records3:
            areatot += float(r[-1])

        # calculate area percentages
        records4 = [float(r[-1]) * 100 / areatot for r in records3]
        records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    # sort results
    if options['sort']:
        if options['sort'] == 'asc':
            records3.sort(key=lambda r: r[-1])
        else:
            records3.sort(key=lambda r: r[-1], reverse=True)

    for r in records3:
        sys.stdout.write('|'.join(map(str, r)) + '\n')
예제 #27
0
def output_centerline(river, stations, elev, outfile):
    """ 
	Output the river network, including centerline for each reach
	and coordinates for all stations along each reach
	"""

    # Begin with the list of reach cats
    rc = grass.read_command('v.category',
                            input=river,
                            type="line",
                            option="print")
    reach_cats = rc.strip().split("\n")

    outfile.write("BEGIN STREAM NETWORK:\n")
    # Now get points from the river vector, one pair for each reach
    for i in range(len(reach_cats)):
        where_cond = "cat=" + str(reach_cats[i])
        riv = grass.read_command('v.db.select',
                                 map=river,
                                 separator=" ",
                                 columns="cat,start_x,start_y,end_x,end_y",
                                 where=where_cond,
                                 flags="c")
        riv_pts = riv.strip().split(" ")
        pi, x1, y1, x2, y2 = riv_pts[0], riv_pts[1], riv_pts[2], riv_pts[
            3], riv_pts[4]
        # Give the start point a point id of "cat"1, and the endpoint an id of "cat"2
        pi1 = pi + "1"
        pi2 = pi + "2"
        # Get elevation of each endpoint from the elev raster
        coords = x1 + "," + y1
        e = grass.read_command('r.what',
                               map=elev,
                               coordinates=coords,
                               separator=",")
        start_elev = e.split(",")[3].rstrip()
        coords = x2 + "," + y2
        e = grass.read_command('r.what',
                               map=elev,
                               coordinates=coords,
                               separator=",")
        end_elev = e.split(",")[3].rstrip()
        outfile.write(" ENDPOINT: " + x1 + "," + y1 + "," + start_elev + "," +
                      pi1 + "\n")
        outfile.write(" ENDPOINT: " + x2 + "," + y2 + "," + end_elev + "," +
                      pi2 + "\n")

    # Loop thru the reach_cats again, and output a REACH: section for each reach,
    # with all points for that reach
    for i in range(len(reach_cats)):
        outfile.write(" REACH:\n")
        outfile.write("   STREAM ID: %s\n" % river)
        reach_id = str(reach_cats[i])
        outfile.write("   REACH ID: %s\n" % reach_id)
        # Get the FROM POINT and TO POINT ids just like above
        where_cond = "cat=" + str(reach_cats[i])
        riv = grass.read_command('v.db.select',
                                 map=river,
                                 separator=" ",
                                 columns="cat,start_x,end_x,start_y,end_y",
                                 where=where_cond,
                                 flags="c")
        r_pts = riv.strip().split(" ")
        pi, x1, y1, x2, y2 = r_pts[0], r_pts[1], r_pts[2], r_pts[3], r_pts[4]
        # Give the start point a point id of "cat"1, and the endpoint a n id of "cat"2
        pi1 = pi + "1"
        pi2 = pi + "2"
        outfile.write("   FROM POINT: %s\n" % pi1)
        outfile.write("   TO POINT: %s\n" % pi2)

        # Now the actual points along centerline
        outfile.write("   CENTERLINE:\n")
        # loop thru the stations point vector to get each station's x,y and id
        reach_cond = "reach_id=" + reach_cats[i]
        p = grass.pipe_command('v.db.select',
                               map=stations,
                               where=reach_cond,
                               quiet=True,
                               flags="c")
        st_list = []
        for line in p.stdout:
            st = line.strip().split('|')
            s, x, y = st[0], st[1], st[2]
            st_list.append([s, x, y])

        p.stdout.close()
        p.wait()
        # Now write out all station points to the CENTERLINE section
        # Go thru the st_list in reverse order so that the centerline is from upstream to downstream
        for i in range(len(st_list) - 1, -1, -1):
            outfile.write("	" + st_list[i][1] + "," + st_list[i][2] +
                          ",NULL," + st_list[i][0] + "\n")

        outfile.write(" END:\n")

    # Close STREAM NETWORK section
    outfile.write("END STREAM NETWORK:\n\n")
    # These map and columns have to exist in the current mapset's search path
    firms_map = 'produnits_%d' % annee
    nace_column = 'cd_nace_%d' % annee
    turnover_column = 'turnover_estim'
    for nace2 in gscript.read_command('v.db.select',
                                      map=firms_map,
                                      column="substr(%s, 1, 2)" % nace_column,
                                      group="substr(%s, 1, 2)" % nace_column,
                                      where="%s <> ''" % nace_column,
                                      flags='c',
                                      quiet=True).splitlines():
        print nace2
        pin = gscript.pipe_command(
            'v.db.select',
            map=firms_map,
            column="x,y,%s" % turnover_column,
            where="substr(%s, 1, 2) = '%s' AND %s >= 0" %
            (nace_column, nace2, turnover_column),
            flags='c',
            quiet=True)
        total_turnover_map = 'turnover_%d_%s' % (annee, nace2)
        p = gscript.start_command('r.in.xyz',
                                  input_='-',
                                  stdin=pin.stdout,
                                  method='sum',
                                  type_='DCELL',
                                  output=total_turnover_map,
                                  quiet=True,
                                  overwrite=True)
        if p.wait() is not 0:
            gscript.fatal("Error in r.in.xyz with nace %s" % nace2)
예제 #29
0
def main(options, flags):

    gisbase = os.getenv("GISBASE")
    if not gisbase:
        gs.fatal(_("$GISBASE not defined"))
        return 0

    # Reference / sample area or points
    ref_rast = options["ref_rast"]
    ref_vect = options["ref_vect"]
    if ref_rast:
        reftype = gs.raster_info(ref_rast)
        if reftype["datatype"] != "CELL":
            gs.fatal(_("The ref_rast map must have type CELL (integer)"))
        if (reftype["min"] != 0 and reftype["min"] != 1) or reftype["max"] != 1:
            gs.fatal(
                _(
                    "The ref_rast map must be a binary raster,"
                    " i.e. it should contain only values 0 and 1 or 1 only"
                    " (now the minimum is {} and maximum is {})".format(
                        reftype["min"], reftype["max"]
                    )
                )
            )

    # old environmental layers & variable names
    REF = options["env"]
    REF = REF.split(",")
    raster_exists(REF)
    ipn = [z.split("@")[0] for z in REF]
    ipn = [x.lower() for x in ipn]

    # new environmental variables
    PROJ = options["env_proj"]
    if not PROJ:
        RP = False
        PROJ = REF
    else:
        RP = True
        PROJ = PROJ.split(",")
        raster_exists(PROJ)
        if len(PROJ) != len(REF) and len(PROJ) != 0:
            gs.fatal(
                _(
                    "The number of reference and predictor variables"
                    " should be the same. You provided {} reference and {}"
                    " projection variables".format(len(REF), len(PROJ))
                )
            )

    # output layers
    opl = options["output"]
    opc = opl + "_MES"
    ipi = [opl + "_" + i for i in ipn]

    # flags
    flm = flags["m"]
    flk = flags["k"]
    fln = flags["n"]
    fli = flags["i"]
    flc = flags["c"]

    # digits / precision
    digits = int(options["digits"])
    digits2 = pow(10, digits)

    # get current region settings, to compare to new ones later
    region_1 = gs.parse_command("g.region", flags="g")

    # Text for history in metadata
    opt2 = dict((k, v) for k, v in options.iteritems() if v)
    hist = " ".join("{!s}={!r}".format(k, v) for (k, v) in opt2.iteritems())
    hist = "r.mess {}".format(hist)
    unused, tmphist = tempfile.mkstemp()
    with open(tmphist, "w") as text_file:
        text_file.write(hist)

    # Create reference layer if not defined
    if not ref_rast and not ref_vect:
        ref_rast = tmpname("tmp0")
        gs.mapcalc("$i = if(isnull($r),null(),1)", i=ref_rast, r=REF[0], quiet=True)

    # Create the recode table - Reference distribution is raster
    citiam = gs.find_file(name="MASK", element="cell", mapset=gs.gisenv()["MAPSET"])
    if citiam["fullname"]:
        rname = tmpname("tmp3")
        gs.mapcalc("$rname = MASK", rname=rname, quiet=True)

    if ref_rast:
        vtl = ref_rast

        # Create temporary layer based on reference layer
        tmpf0 = tmpname("tmp2")
        gs.mapcalc("$tmpf0 = int($vtl * 1)", vtl=vtl, tmpf0=tmpf0, quiet=True)
        gs.run_command("r.null", map=tmpf0, setnull=0, quiet=True)
        if citiam["fullname"]:
            gs.run_command("r.mask", flags="r", quiet=True)
        for i in xrange(len(REF)):

            # Create mask based on combined MASK/reference layer
            gs.run_command("r.mask", raster=tmpf0, quiet=True)

            # Calculate the frequency distribution
            tmpf1 = tmpname("tmp4")
            gs.mapcalc(
                "$tmpf1 = int($dignum * $inplay)",
                tmpf1=tmpf1,
                inplay=REF[i],
                dignum=digits2,
                quiet=True,
            )
            p = gs.pipe_command(
                "r.stats", quiet=True, flags="cn", input=tmpf1, sort="asc", sep=";"
            )
            stval = {}
            for line in p.stdout:
                [val, count] = line.strip(os.linesep).split(";")
                stval[float(val)] = float(count)
            p.wait()
            sstval = sorted(stval.items(), key=operator.itemgetter(0))
            sstval = np.matrix(sstval)
            a = np.cumsum(np.array(sstval), axis=0)
            b = np.sum(np.array(sstval), axis=0)
            c = a[:, 1] / b[1] * 100

            # Remove tmp mask and set region to env_proj if needed
            gs.run_command("r.mask", quiet=True, flags="r")
            if RP:
                gs.use_temp_region()
                gs.run_command("g.region", quiet=True, raster=PROJ[0])

            # get new region settings, to compare to original ones later
            region_2 = gs.parse_command("g.region", flags="g")

            # Get min and max values for recode table (based on full map)
            tmpf2 = tmpname("tmp5")
            gs.mapcalc(
                "$tmpf2 = int($dignum * $inplay)",
                tmpf2=tmpf2,
                inplay=PROJ[i],
                dignum=digits2,
                quiet=True,
            )
            d = gs.parse_command("r.univar", flags="g", map=tmpf2, quiet=True)

            # Create recode rules
            Dmin = int(d["min"])
            Dmax = int(d["max"])
            envmin = np.min(np.array(sstval), axis=0)[0]
            envmax = np.max(np.array(sstval), axis=0)[0]

            if Dmin < envmin:
                e1 = Dmin - 1
            else:
                e1 = envmin - 1
            if Dmax > envmax:
                e2 = Dmax + 1
            else:
                e2 = envmax + 1

            a1 = np.hstack([(e1), np.array(sstval.T[0])[0, :]])
            a2 = np.hstack([np.array(sstval.T[0])[0, :] - 1, (e2)])
            b1 = np.hstack([(0), c])

            fd2, tmprule = tempfile.mkstemp(suffix=ipn[i])
            with open(tmprule, "w") as text_file:
                for k in np.arange(0, len(b1.T)):
                    text_file.write(
                        "%s:%s:%s\n" % (str(int(a1[k])), str(int(a2[k])), str(b1[k]))
                    )

            # Create the recode layer and calculate the IES
            compute_ies(tmprule, ipi[i], tmpf2, envmin, envmax)
            gs.run_command(
                "r.support",
                map=ipi[i],
                title="IES {}".format(REF[i]),
                units="0-100 (relative score",
                description="Environmental similarity {}".format(REF[i]),
                loadhistory=tmphist,
            )

            # Clean up
            os.close(fd2)
            os.remove(tmprule)

            # Change region back to original
            gs.del_temp_region()

    # Create the recode table - Reference distribution is vector
    else:
        vtl = ref_vect

        # Copy point layer and add columns for variables
        tmpf0 = tmpname("tmp7")
        gs.run_command(
            "v.extract", quiet=True, flags="t", input=vtl, type="point", output=tmpf0
        )
        gs.run_command("v.db.addtable", quiet=True, map=tmpf0)

        # TODO: see if there is a more efficient way to handle the mask
        if citiam["fullname"]:
            gs.run_command("r.mask", quiet=True, flags="r")

        # Upload raster values and get value in python as frequency table
        sql1 = "SELECT cat FROM {}".format(str(tmpf0))
        cn = len(np.hstack(db.db_select(sql=sql1)))
        for m in xrange(len(REF)):

            # Set mask back (this means that points outside the mask will
            # be ignored in the computation of the frequency distribution
            # of the reference variabele env(m))
            if citiam["fullname"]:
                gs.run_command("g.copy", raster=[rname, "MASK"], quiet=True)

            # Compute frequency distribution of variable(m)
            mid = str(m)
            laytype = gs.raster_info(REF[m])["datatype"]
            if laytype == "CELL":
                columns = "envvar_{} integer".format(str(mid))
            else:
                columns = "envvar_%s double precision" % mid
            gs.run_command("v.db.addcolumn", map=tmpf0, columns=columns, quiet=True)
            sql2 = "UPDATE {} SET envvar_{} = NULL".format(str(tmpf0), str(mid))
            gs.run_command("db.execute", sql=sql2, quiet=True)
            coln = "envvar_%s" % mid
            gs.run_command(
                "v.what.rast",
                quiet=True,
                map=tmpf0,
                layer=1,
                raster=REF[m],
                column=coln,
            )
            sql3 = (
                "SELECT {0}, count({0}) from {1} WHERE {0} IS NOT NULL "
                "GROUP BY {0} ORDER BY {0}"
            ).format(coln, tmpf0)
            volval = np.vstack(db.db_select(sql=sql3))
            volval = volval.astype(np.float, copy=False)
            a = np.cumsum(volval[:, 1], axis=0)
            b = np.sum(volval[:, 1], axis=0)
            c = a / b * 100

            # Check for point without values
            if b < cn:
                gs.info(
                    _(
                        "Please note that there were {} points without "
                        "value. This is probably because they are outside "
                        "the computational region or mask".format((cn - b))
                    )
                )

            # Set region to env_proj layers (if different from env) and remove
            # mask (if set above)
            if citiam["fullname"]:
                gs.run_command("r.mask", quiet=True, flags="r")
            if RP:
                gs.use_temp_region()
                gs.run_command("g.region", quiet=True, raster=PROJ[0])
            region_2 = gs.parse_command("g.region", flags="g")

            # Multiply env_proj layer with dignum
            tmpf2 = tmpname("tmp8")
            gs.mapcalc(
                "$tmpf2 = int($dignum * $inplay)",
                tmpf2=tmpf2,
                inplay=PROJ[m],
                dignum=digits2,
                quiet=True,
            )

            # Calculate min and max values of sample points and raster layer
            envmin = int(min(volval[:, 0]) * digits2)
            envmax = int(max(volval[:, 0]) * digits2)
            Drange = gs.read_command("r.info", flags="r", map=tmpf2)
            Drange = str.splitlines(Drange)
            Drange = np.hstack([i.split("=") for i in Drange])
            Dmin = int(Drange[1])
            Dmax = int(Drange[3])

            if Dmin < envmin:
                e1 = Dmin - 1
            else:
                e1 = envmin - 1
            if Dmax > envmax:
                e2 = Dmax + 1
            else:
                e2 = envmax + 1

            a0 = volval[:, 0] * digits2
            a0 = a0.astype(np.int, copy=False)
            a1 = np.hstack([(e1), a0])
            a2 = np.hstack([a0 - 1, (e2)])
            b1 = np.hstack([(0), c])

            fd3, tmprule = tempfile.mkstemp(suffix=ipn[m])
            with open(tmprule, "w") as text_file:
                for k in np.arange(0, len(b1)):
                    rtmp = "{}:{}:{}\n".format(
                        str(int(a1[k])), str(int(a2[k])), str(b1[k])
                    )
                    text_file.write(rtmp)

            # Create the recode layer and calculate the IES
            compute_ies(tmprule, ipi[m], tmpf2, envmin, envmax)
            gs.run_command(
                "r.support",
                map=ipi[m],
                title="IES {}".format(REF[m]),
                units="0-100 (relative score",
                description="Environmental similarity {}".format(REF[m]),
                loadhistory=tmphist,
            )

            # Clean up
            os.close(fd3)
            os.remove(tmprule)

            # Change region back to original
            gs.del_temp_region()

    # Calculate MESS statistics
    # Set region to env_proj layers (if different from env)
    # Note: this changes the region, to ensure the newly created layers
    # are actually visible to the user. This goes against normal practise
    # There will be a warning.
    if RP:
        gs.run_command("g.region", quiet=True, raster=PROJ[0])

    # MES
    gs.run_command("r.series", quiet=True, output=opc, input=ipi, method="minimum")
    gs.write_command("r.colors", map=opc, rules="-", stdin=COLORS_MES, quiet=True)

    # Write layer metadata
    gs.run_command(
        "r.support",
        map=opc,
        title="Areas with novel conditions",
        units="0-100 (relative score",
        description="The multivariate environmental similarity" "(MES)",
        loadhistory=tmphist,
    )

    # Area with negative MES
    if fln:
        mod1 = "{}_novel".format(opl)
        gs.mapcalc("$mod1 = int(if( $opc < 0, 1, 0))", mod1=mod1, opc=opc, quiet=True)

        # Write category labels
        gs.write_command(
            "r.category", map=mod1, rules="-", stdin=RECL_MESNEG, quiet=True
        )

        # Write layer metadata
        gs.run_command(
            "r.support",
            map=mod1,
            title="Areas with novel conditions",
            units="-",
            source1="Based on {}".format(opc),
            description="1 = novel conditions, 0 = within range",
            loadhistory=tmphist,
        )

    # Most dissimilar variable (MoD)
    if flm:
        tmpf4 = tmpname("tmp9")
        mod2 = "{}_MoD".format(opl)
        gs.run_command(
            "r.series", quiet=True, output=tmpf4, input=ipi, method="min_raster"
        )
        gs.mapcalc("$mod2 = int($tmpf4)", mod2=mod2, tmpf4=tmpf4, quiet=True)

        fd4, tmpcat = tempfile.mkstemp()
        with open(tmpcat, "w") as text_file:
            for cats in xrange(len(ipi)):
                text_file.write("{}:{}\n".format(str(cats), REF[cats]))
        gs.run_command("r.category", quiet=True, map=mod2, rules=tmpcat, separator=":")
        os.close(fd4)
        os.remove(tmpcat)

        # Write layer metadata
        gs.run_command(
            "r.support",
            map=mod2,
            title="Most dissimilar variable (MoD)",
            units="-",
            source1="Based on {}".format(opc),
            description="Name of most dissimilar variable",
            loadhistory=tmphist,
        )

    # sum(IES), where IES < 0
    if flk:
        mod3 = "{}_SumNeg".format(opl)
        c0 = -0.01 / digits2
        gs.run_command(
            "r.series",
            quiet=True,
            input=ipi,
            method="sum",
            range=("-inf", c0),
            output=mod3,
        )
        gs.write_command("r.colors", map=mod3, rules="-", stdin=COLORS_MES, quiet=True)

        # Write layer metadata
        gs.run_command(
            "r.support",
            map=mod3,
            title="Sum of negative IES values",
            units="-",
            source1="Based on {}".format(opc),
            description="Sum of negative IES values",
            loadhistory=tmphist,
        )

    # Number of layers with negative values
    if flc:
        tmpf5 = tmpname("tmp10")
        mod4 = "{}_CountNeg".format(opl)
        MinMes = gs.read_command("r.info", quiet=True, flags="r", map=opc)
        MinMes = str.splitlines(MinMes)
        MinMes = float(np.hstack([i.split("=") for i in MinMes])[1])
        c0 = -0.0001 / digits2
        gs.run_command(
            "r.series",
            quiet=True,
            input=ipi,
            output=tmpf5,
            method="count",
            range=(MinMes, c0),
        )
        gs.mapcalc("$mod4 = int($tmpf5)", mod4=mod4, tmpf5=tmpf5, quiet=True)

        # Write layer metadata
        gs.run_command(
            "r.support",
            map=mod4,
            title="Number of layers with negative values",
            units="-",
            source1="Based on {}".format(opc),
            description="Number of layers with negative values",
            loadhistory=tmphist,
        )

    # Remove IES layers
    if fli:
        gs.run_command("g.remove", quiet=True, flags="f", type="raster", name=ipi)
    # Clean up tmp file
    os.remove(tmphist)

    gs.message(_("Finished ...\n"))
    if region_1 != region_2:
        gs.message(
            _(
                "\nPlease note that the region has been changes to match"
                " the set of projection (env_proj) variables.\n"
            )
        )
예제 #30
0
def output_xsections(xsects, outfile, elev, res, river):
    """
	Create the cross section profiles by first making a points vector from the cross sections
	loop thru the section_ids and use v.out.ascii to get the points for each cross section.
	Feed these points into r.profile to get lists of the coords and elevation at each spot along the xsection,
	and output these to the CROSS-SECTION paragraph
	"""

    # Prepare tmp vector maps to hold the points from the cross sections
    proc = os.getpid()
    xsect_pts = "tmp_xsect_pts_" + str(proc)
    #xsect_pts2="tmp_xsect_pts2_"+str(proc)
    # v.to.points returns all points on the same cross section with the same cat value
    grass.run_command('v.to.points',
                      input=xsects,
                      output=xsect_pts,
                      use="vertex",
                      quiet=True)
    outfile.write("\n")
    outfile.write("BEGIN CROSS-SECTIONS:\n")

    # Get the list of station ids with the reaches
    # Layer 1 contains one cat for all points on the same xsect line,
    # so v.db.select returns one row for each xsect line (not for each point on the line)
    st = grass.pipe_command('v.db.select',
                            map=xsect_pts,
                            layer=1,
                            columns="reach,station_id",
                            flags="c",
                            quiet=True)
    station_ids = []
    for line in st.stdout:
        station = line.rstrip('\n').split("|")
        r, s = station[0], station[1]
        station_ids.append([r, s])

    st.stdout.close()
    st.wait()

    # Now loop thru those stations to create the CUTLINE and SURFACE section
    for i in range(len(station_ids)):
        station_id = station_ids[i][1].strip('\n')
        reach = station_ids[i][0].rstrip('\n')
        process_msg = "Processing reach: " + reach + " at station id: " + station_id
        grass.message(process_msg)
        outfile.write(" CROSS-SECTION:\n")
        outfile.write("   STREAM ID: %s\n" % river)
        outfile.write("   REACH ID: %s\n" % reach)
        outfile.write("   STATION: %s\n" % station_id)

        # get point coords from this reach/station and print out CUTLINE: section
        # Save this output also for next SURFACE: section
        station_cond = "station_id=" + station_id
        p = grass.pipe_command('v.out.ascii',
                               input=xsect_pts,
                               columns="reach,station_id",
                               where=station_cond,
                               separator=",",
                               layer=1,
                               quiet=True)
        station_pts = []
        for line in p.stdout:
            st = line.rstrip('\n').split(',')
            station_pts.append(st)

        p.stdout.close()
        p.wait()

        outfile.write("   CUTLINE:\n")
        for j in range(len(station_pts)):
            x, y = station_pts[j][0], station_pts[j][1]
            outfile.write("	 " + x + "," + y + "\n")

        # Now run the points thru r.profile, and get the elevations
        # and print elevations to SURFACE LINE: section
        outfile.write("   SURFACE LINE:\n")
        profile_pts = []
        for k in range(len(station_pts)):
            x, y = station_pts[k][0], station_pts[k][1]
            profile_pts.append([x, y])

        pp = grass.pipe_command('r.profile',
                                input=elev,
                                coordinates=profile_pts,
                                resolution=res,
                                flags="g",
                                quiet=True)
        for line in pp.stdout:
            l = line.rstrip('\n').split(" ")
            # The r.profile output has x,y in first two columns and elev in 4th column
            outfile.write("	 " + l[0] + "," + l[1] + "," + l[3] + "\n")
        pp.stdout.close()
        pp.wait()

        # CLose this CROSS-SECTION paragraph
        outfile.write(" END:\n\n")

    outfile.write("END CROSS-SECTIONS:\n\n")

    # remove temp points file
    grass.message("Removing temp vector: %s" % (xsect_pts))
    grass.run_command('g.remove',
                      type='vector',
                      name=xsect_pts,
                      quiet=True,
                      flags="f")
예제 #31
0
def calculate_lfp(input, output, idcol, id, coords, outlet, layer,
                  outletidcol):
    prefix = "r_lfp_%d_" % os.getpid()

    if id:
        ids = id.split(",")
        for i in range(0, len(ids)):
            try:
                ids[i] = int(ids[i])
            except:
                grass.fatal(_("Invalid ID '%s'") % ids[i])
    else:
        ids = []

    if coords:
        coords = coords.split(",")
    else:
        coords = []

    # append outlet points to coordinates
    if outlet:
        p = grass.pipe_command("v.report",
                               map=outlet,
                               layer=layer,
                               option="coor")
        for line in p.stdout:
            line = line.rstrip("\n")
            if line.startswith("cat|"):
                colnames = line.split("|")
                outletid_ind = -1
                for i in range(0, len(colnames)):
                    colname = colnames[i]
                    if colname == outletidcol:
                        outletid_ind = i
                    elif colname == "x":
                        x_ind = i
                    elif colname == "y":
                        y_ind = i
                if outletidcol and outletid_ind == -1:
                    grass.fatal(
                        _("Cannot find column <%s> in vector map <%s>") %
                        (outletidcol, outlet))
                continue
            cols = line.split("|")
            coords.extend([cols[x_ind], cols[y_ind]])
            if outletid_ind >= 0:
                try:
                    ids.extend([int(cols[outletid_ind])])
                except:
                    grass.fatal(_("Invalid ID '%s'") % ids[i])
        p.wait()
        if p.returncode != 0:
            grass.fatal(_("Cannot read outlet points"))

    if len(ids) > 0:
        if len(ids) > len(coords) / 2:
            grass.fatal(_("Too many IDs"))
        elif len(ids) < len(coords) / 2:
            grass.fatal(_("Too few IDs"))
        assign_id = True
    else:
        assign_id = False

    # create the output vector map
    try:
        grass.run_command("v.edit", map=output, tool="create")
    except CalledModuleError:
        grass.fatal(_("Cannot create the output vector map"))

    if assign_id:
        try:
            grass.run_command("v.db.addtable",
                              map=output,
                              columns="%s integer" % idcol)
        except CalledModuleError:
            grass.fatal(_("Cannot add a table to the output vector map"))

    for i in range(0, len(coords) / 2):
        cat = i + 1
        coor = "%s,%s" % (coords[2 * i], coords[2 * i + 1])
        if assign_id:
            id = ids[i]
            grass.message(_("Processing outlet %d at %s...") % (id, coor))
        else:
            grass.message(_("Processing outlet at %s...") % coor)

        # create the outlet vector map
        out = prefix + "out"
        p = grass.feed_command("v.in.ascii",
                               overwrite=True,
                               input="-",
                               output=out,
                               separator=",")
        p.stdin.write(coor)
        p.stdin.close()
        p.wait()
        if p.returncode != 0:
            grass.fatal(_("Cannot create the outlet vector map"))

        # convert the outlet vector map to raster
        try:
            grass.run_command("v.to.rast",
                              overwrite=True,
                              input=out,
                              output=out,
                              use="cat",
                              type="point")
        except CalledModuleError:
            grass.fatal(_("Cannot convert the outlet vector to raster"))

        # calculate the downstream flow length
        flds = prefix + "flds"
        try:
            grass.run_command("r.stream.distance",
                              overwrite=True,
                              flags="om",
                              stream_rast=out,
                              direction=input,
                              method="downstream",
                              distance=flds)
        except CalledModuleError:
            grass.fatal(_("Cannot calculate the downstream flow length"))

        # find the longest flow length
        p = grass.pipe_command("r.info", flags="r", map=flds)
        max = ""
        for line in p.stdout:
            line = line.rstrip("\n")
            if line.startswith("max="):
                max = line.split("=")[1]
                break
        p.wait()
        if p.returncode != 0 or max == "":
            grass.fatal(_("Cannot find the longest flow length"))

        threshold = float(max) - 0.0005

        # find the headwater cells
        heads = prefix + "heads"
        try:
            grass.run_command("r.mapcalc",
                              overwrite=True,
                              expression="%s=if(%s>=%f,1,null())" %
                              (heads, flds, threshold))
        except CalledModuleError:
            grass.fatal(_("Cannot find the headwater cells"))

        # create the headwater vector map
        try:
            grass.run_command("r.to.vect",
                              overwrite=True,
                              input=heads,
                              output=heads,
                              type="point")
        except CalledModuleError:
            grass.fatal(_("Cannot create the headwater vector map"))

        # calculate the longest flow path in vector format
        path = prefix + "path"
        try:
            grass.run_command("r.path",
                              overwrite=True,
                              input=input,
                              vector_path=path,
                              start_points=heads)
        except CalledModuleError:
            grass.fatal(_("Cannot create the longest flow path vector map"))

        # snap the outlet
        try:
            grass.run_command("r.to.vect",
                              overwrite=True,
                              input=out,
                              output=out,
                              type="point")
        except CalledModuleError:
            grass.fatal(_("Cannot snap the outlet"))

        # find the coordinates of the snapped outlet
        p = grass.pipe_command("v.to.db", flags="p", map=out, option="coor")
        coor = ""
        for line in p.stdout:
            line = line.rstrip("\n")
            if line == "cat|x|y|z":
                continue
            cols = line.split("|")
            coor = "%s,%s" % (cols[1], cols[2])
        p.wait()
        if p.returncode != 0 or coor == "":
            grass.fatal(_("Cannot find the coordinates of the snapped outlet"))

        # split the longest flow path at the outlet
        try:
            grass.run_command("v.edit", map=path, tool="break", coords=coor)
        except CalledModuleError:
            grass.fatal(_("Cannot split the longest flow path at the outlet"))

        # select the final longest flow path
        lfp = prefix + "lfp"
        try:
            grass.run_command("v.select",
                              overwrite=True,
                              ainput=path,
                              binput=heads,
                              output=lfp)
        except CalledModuleError:
            grass.fatal(_("Cannot select the final longest flow path"))

        lfp2 = lfp + "2"
        try:
            grass.run_command("v.category",
                              overwrite=True,
                              input=lfp,
                              output=lfp2,
                              option="del",
                              cat=-1)
            grass.run_command("v.category",
                              overwrite=True,
                              input=lfp2,
                              output=lfp,
                              option="add",
                              cat=cat,
                              step=0)
        except CalledModuleError:
            grass.fatal(_("Cannot add category %d") % cat)

        # copy the final longest flow path to the output map
        try:
            grass.run_command("v.edit",
                              flags="r",
                              map=output,
                              tool="copy",
                              bgmap=lfp,
                              cats=0)
        except CalledModuleError:
            grass.fatal(_("Cannot copy the final longest flow path"))

        if assign_id:
            try:
                grass.run_command("v.to.db",
                                  map=output,
                                  option="cat",
                                  columns="cat")
                grass.run_command("v.db.update",
                                  map=output,
                                  column=idcol,
                                  value=id,
                                  where="cat=%d" % cat)
            except CalledModuleError:
                grass.fatal(_("Cannot assign ID %d") % id)

    # remove intermediate outputs
    grass.run_command("g.remove",
                      flags="f",
                      type="raster,vector",
                      pattern="%s*" % prefix)

    # write history if supported
    version = grass.version()
    if version["revision"] != "exported":
        # the revision number is available
        version = int(version["revision"][1:])
    else:
        # some binary distributions don't build from the SVN repository and
        # revision is not available; use the libgis revision as a fallback in
        # this case
        version = int(version["libgis_revision"])

    if version >= 70740:
        # v.support -h added in r70740
        grass.run_command("v.support",
                          flags="h",
                          map=output,
                          cmdhist=os.environ["CMDLINE"])
예제 #32
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, rastertmp
    rastertmp = False
    #### setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = file(os.devnull, 'w')

    raster = options['raster']
    colprefix = options['column_prefix']
    vector = options['map']
    layer = options['layer']
    percentile = options['percentile']
    basecols = options['method'].split(',')

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
        vect_mapset = vs[1]
    else:
        vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector', mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    # check the input raster map
    if not grass.find_file(raster, 'cell')['file']:
        grass.fatal(_("Raster map <%s> not found") % raster)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align=raster)

    grass.message(_("Preprocessing input data..."))
    try:
        grass.run_command('v.to.rast', input=vector, layer=layer, output=rastertmp,
                          use='cat', quiet=True)
    except CalledModuleError:
        grass.fatal(_("An error occurred while converting vector to raster"))

    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command('r.category', map=rastertmp, sep=';', quiet=True)
    cats = []

    for line in p.stdout:
        cats.append(line.rstrip('\r\n').split(';')[0])
    p.wait()

    number = len(cats)
    if number < 1:
        grass.fatal(_("No categories found in raster map"))

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
        grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))

    # replaced by user choiche
    #basecols = ['n', 'min', 'max', 'range', 'mean', 'stddev', 'variance', 'cf_var', 'sum']

    # we need at least three chars to distinguish [mea]n from [med]ian
    # so colprefix can't be longer than 6 chars with DBF driver
    if dbfdriver:
        colprefix = colprefix[:6]
        variables_dbf = {}

    # by default perccol variable is used only for "variables" variable
    perccol = "percentile"
    perc = None
    for b in basecols:
        if b.startswith('p'):
            perc = b
    if perc:
        # namespace is limited in DBF but the % value is important
        if dbfdriver:
            perccol = "per" + percentile
        else:
            perccol = "percentile_" + percentile
        percindex = basecols.index(perc)
        basecols[percindex] = perccol

    # dictionary with name of methods and position in "r.univar -gt"  output
    variables = {'number': 2, 'minimum': 4, 'maximum': 5, 'range': 6,
                 'average': 7, 'stddev': 9, 'variance': 10, 'coeff_var': 11,
                 'sum': 12, 'first_quartile': 14, 'median': 15,
                 'third_quartile': 16, perccol: 17}
    # this list is used to set the 'e' flag for r.univar
    extracols = ['first_quartile', 'median', 'third_quartile', perccol]
    addcols = []
    colnames = []
    extstat = ""
    for i in basecols:
        # this check the complete name of out input that should be truncated
        for k in variables.keys():
            if i in k:
                i = k
                break
        if i in extracols:
            extstat = 'e'
        # check if column already present
        currcolumn = ("%s_%s" % (colprefix, i))
        if dbfdriver:
            currcolumn = currcolumn[:10]
            variables_dbf[currcolumn.replace("%s_" % colprefix, '')] = i

        colnames.append(currcolumn)
        if currcolumn in grass.vector_columns(vector, layer).keys():
            if not flags['c']:
                grass.fatal((_("Cannot create column <%s> (already present). ") % currcolumn) +
                             _("Use -c flag to update values in this column."))
        else:
            if i == "n":
                coltype = "INTEGER"
            else:
                coltype = "DOUBLE PRECISION"
            addcols.append(currcolumn + ' ' + coltype)

    if addcols:
        grass.verbose(_("Adding columns '%s'") % addcols)
        try:
            grass.run_command('v.db.addcolumn', map=vector, columns=addcols,
                              layer=layer)
        except CalledModuleError:
            grass.fatal(_("Adding columns failed. Exiting."))

    # calculate statistics:
    grass.message(_("Processing input data (%d categories)...") % number)

    # get rid of any earlier attempts
    grass.try_remove(sqltmp)

    f = file(sqltmp, 'w')

    # do the stats
    p = grass.pipe_command('r.univar', flags='t' + extstat, map=raster,
                           zones=rastertmp, percentile=percentile, sep=';')

    first_line = 1

    f.write("{}\n".format(grass.db_begin_transaction(fi['driver'])))
    for line in p.stdout:
        if first_line:
            first_line = 0
            continue

        vars = line.rstrip('\r\n').split(';')

        f.write("UPDATE %s SET" % fi['table'])
        first_var = 1
        for colname in colnames:
            variable = colname.replace("%s_" % colprefix, '', 1)
            if dbfdriver:
                variable = variables_dbf[variable]
            i = variables[variable]
            value = vars[i]
            # convert nan, +nan, -nan, inf, +inf, -inf, Infinity, +Infinity,
            # -Infinity to NULL
            if value.lower().endswith('nan') or 'inf' in value.lower():
                value = 'NULL'
            if not first_var:
                f.write(" , ")
            else:
                first_var = 0
            f.write(" %s=%s" % (colname, value))

        f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))
    f.write("{}\n".format(grass.db_commit_transaction(fi['driver'])))
    p.wait()
    f.close()

    grass.message(_("Updating the database ..."))
    exitcode = 0
    try:
        grass.run_command('db.execute', input=sqltmp,
                          database=fi['database'], driver=fi['driver'])
        grass.verbose((_("Statistics calculated from raster map <{raster}>"
                         " and uploaded to attribute table"
                         " of vector map <{vector}>."
                         ).format(raster=raster, vector=vector)))
    except CalledModuleError:
        grass.warning(_("Failed to upload statistics to attribute table of vector map <%s>.") % vector)
        exitcode = 1

    sys.exit(exitcode)
예제 #33
0
def main():
    mapname = options["map"]
    layer = options["layer"]
    option = options["option"]
    units = options["units"]
    sort = options["sort"]
    fs = separator(options["separator"])

    nuldev = open(os.devnull, "w")

    if not grass.find_file(mapname, "vector")["file"]:
        grass.fatal(_("Vector map <%s> not found") % mapname)

    if int(layer) in grass.vector_db(mapname):
        colnames = grass.vector_columns(mapname,
                                        layer,
                                        getDict=False,
                                        stderr=nuldev)
        isConnection = True
    else:
        isConnection = False
        colnames = ["cat"]

    if option == "coor":
        extracolnames = ["x", "y", "z"]
    else:
        extracolnames = [option]

    if units == "percent":
        unitsp = "meters"
    elif units:
        unitsp = units
    else:
        unitsp = None

    # NOTE: we suppress -1 cat and 0 cat
    if isConnection:
        f = grass.vector_db(map=mapname)[int(layer)]
        p = grass.pipe_command("v.db.select",
                               flags="e",
                               quiet=True,
                               map=mapname,
                               layer=layer)
        records1 = []
        catcol = -1
        ncols = 0
        for line in p.stdout:
            cols = decode(line).rstrip("\r\n").split("|")
            if catcol == -1:
                ncols = len(cols)
                for i in range(0, ncols):
                    if cols[i] == f["key"]:
                        catcol = i
                        break
                if catcol == -1:
                    grass.fatal(
                        _("There is a table connected to input vector map '%s', but "
                          "there is no key column '%s'.") %
                        (mapname, f["key"]))
                continue
            if cols[catcol] == "-1" or cols[catcol] == "0":
                continue
            records1.append(cols[:catcol] + [int(cols[catcol])] +
                            cols[(catcol + 1):])
        p.wait()
        if p.returncode != 0:
            sys.exit(1)

        records1.sort(key=lambda r: r[catcol])

        if len(records1) == 0:
            try:
                grass.fatal(
                    _("There is a table connected to input vector map '%s', but "
                      "there are no categories present in the key column '%s'. Consider using "
                      "v.to.db to correct this.") % (mapname, f["key"]))
            except KeyError:
                pass

        # fetch the requested attribute sorted by cat:
        p = grass.pipe_command(
            "v.to.db",
            flags="p",
            quiet=True,
            map=mapname,
            option=option,
            layer=layer,
            units=unitsp,
        )
        records2 = []
        for line in p.stdout:
            fields = decode(line).rstrip("\r\n").split("|")
            if fields[0] in ["cat", "-1", "0"]:
                continue
            records2.append([int(fields[0])] + fields[1:])
        p.wait()
        records2.sort()

        # make pre-table
        # len(records1) may not be the same as len(records2) because
        # v.db.select can return attributes that are not linked to features.
        records3 = []
        for r2 in records2:
            rec = list(filter(lambda r1: r1[catcol] == r2[0], records1))
            if len(rec) > 0:
                res = rec[0] + r2[1:]
            elif flags["d"]:
                res = [r2[0]] + [""] * (ncols - 1) + r2[1:]
            else:
                continue
            records3.append(res)
    else:
        catcol = 0
        records1 = []
        p = grass.pipe_command("v.category",
                               inp=mapname,
                               layer=layer,
                               option="print")
        for line in p.stdout:
            field = int(decode(line).rstrip())
            if field > 0:
                records1.append(field)
        p.wait()
        records1.sort()
        records1 = uniq(records1)

        # make pre-table
        p = grass.pipe_command(
            "v.to.db",
            flags="p",
            quiet=True,
            map=mapname,
            option=option,
            layer=layer,
            units=unitsp,
        )
        records3 = []
        for line in p.stdout:
            fields = decode(line).rstrip("\r\n").split("|")
            if fields[0] in ["cat", "-1", "0"]:
                continue
            records3.append([int(fields[0])] + fields[1:])
        p.wait()
        records3.sort()

    # print table header
    if not flags["c"]:
        sys.stdout.write(fs.join(colnames + extracolnames) + "\n")

    # make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units == "percent" and option != "coor":
        # calculate total value
        total = 0
        for r in records3:
            total += float(r[-1])

        # calculate percentages
        records4 = [float(r[-1]) * 100 / total for r in records3]
        if type(records1[0]) == int:
            records3 = [[r1] + [r4] for r1, r4 in zip(records1, records4)]
        else:
            records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    # sort results
    if sort:
        if sort == "asc":
            if option == "coor":
                records3.sort(
                    key=lambda r: (float(r[-3]), float(r[-2]), float(r[-1])))
            else:
                records3.sort(key=lambda r: float(r[-1]))
        else:
            if option == "coor":
                records3.sort(
                    key=lambda r: (float(r[-3]), float(r[-2]), float(r[-1])),
                    reverse=True,
                )
            else:
                records3.sort(key=lambda r: float(r[-1]), reverse=True)

    for r in records3:
        sys.stdout.write(fs.join(map(str, r)) + "\n")
예제 #34
0
def main(options, flags):

    # Check if running in GRASS
    gisbase = os.getenv("GISBASE")
    if not gisbase:
        gs.fatal(_("$GISBASE not defined"))
        return 0

    # variables
    ipl = options["env"]
    ipl = ipl.split(",")
    raster_exists(ipl)
    ipn = [z.split("@")[0] for z in ipl]
    ipn = [x.lower() for x in ipn]
    out = options["output"]
    if out:
        tmpf0 = out
    else:
        tmpf0 = tmpname("reb0")
    filename = options["file"]
    ref = options["ref"]
    flag_m = flags["m"]
    flag_n = flags["n"]
    flag_o = flags["o"]
    flag_i = flags["i"]
    digits = int(options["digits"])
    digits2 = pow(10, digits)

    # Check if ref map is of type cell and values are limited to 1 and 0
    reftype = gs.raster_info(ref)
    if reftype['datatype'] != "CELL":
        gs.fatal(_("Your reference map must have type CELL (integer)"))
    if reftype['min'] != 0 or reftype['max'] != 1:
        gs.fatal(
            _("The input raster map must be a binary raster,"
              " i.e. it should contain only values 0 and 1 "
              " (now the minimum is %d and maximum is %d)") %
            (reftype['min'], reftype['max']))

    # Text for history in metadata
    opt2 = dict((k, v) for k, v in options.iteritems() if v)
    hist = ' '.join("{!s}={!r}".format(k, v) for (k, v) in opt2.iteritems())
    hist = "r.meb {}".format(hist)
    unused, tmphist = tempfile.mkstemp()
    text_file = open(tmphist, "w")
    text_file.write(hist)
    text_file.close()

    # ------------------------------------------------------------------------
    # Compute MES
    # ------------------------------------------------------------------------

    # Create temporary copy of ref layer
    tmpref0 = tmpname("reb1")
    CLEAN_RAST.append(tmpref0)
    gs.run_command("g.copy", quiet=True, raster=(ref, tmpref0))

    ipi = []
    for j in xrange(len(ipl)):
        # Calculate the frequency distribution
        tmpf1 = tmpname("reb1")
        CLEAN_RAST.append(tmpf1)
        laytype = gs.raster_info(ipl[j])["datatype"]
        if laytype == "CELL":
            gs.run_command("g.copy", quiet=True, raster=(ipl[j], tmpf1))
        else:
            gs.mapcalc("$tmpf1 = int($dignum * $inplay)",
                       tmpf1=tmpf1,
                       inplay=ipl[j],
                       dignum=digits2,
                       quiet=True)
        p = gs.pipe_command("r.stats",
                            quiet=True,
                            flags="cn",
                            input=tmpf1,
                            sort="asc",
                            sep=";")
        stval = {}
        for line in p.stdout:
            [val, count] = line.strip(os.linesep).split(";")
            stval[float(val)] = float(count)
        p.wait()
        sstval = sorted(stval.items(), key=operator.itemgetter(0))
        sstval = np.matrix(sstval)
        a = np.cumsum(np.array(sstval), axis=0)
        b = np.sum(np.array(sstval), axis=0)
        c = a[:, 1] / b[1] * 100

        # Create recode rules
        e1 = np.min(np.array(sstval), axis=0)[0] - 99999
        e2 = np.max(np.array(sstval), axis=0)[0] + 99999
        a1 = np.hstack([(e1), np.array(sstval.T[0])[0, :]])
        a2 = np.hstack([np.array(sstval.T[0])[0, :] - 1, (e2)])
        b1 = np.hstack([(0), c])

        fd2, tmprule = tempfile.mkstemp()
        text_file = open(tmprule, "w")
        for k in np.arange(0, len(b1.T)):
            text_file.write("{}:{}:{}\n".format(int(a1[k]), int(a2[k]), b1[k]))
        text_file.close()

        # Create the recode layer and calculate the IES
        tmpf2 = tmpname("reb2")
        CLEAN_RAST.append(tmpf2)
        gs.run_command("r.recode", input=tmpf1, output=tmpf2, rules=tmprule)

        tmpf3 = tmpname("reb3")
        CLEAN_RAST.append(tmpf3)

        calcc = "{1} = if({0} <= 50, 2 * float({0}), if({0} < 100, " \
                "2 * (100 - float({0}))))".format(tmpf2, tmpf3)
        gs.mapcalc(calcc, quiet=True)
        gs.run_command("g.remove",
                       quiet=True,
                       flags="f",
                       type="raster",
                       name=(tmpf2, tmpf1))
        os.close(fd2)
        os.remove(tmprule)
        ipi.append(tmpf3)

    # ----------------------------------------------------------------------
    # Calculate EB statistics
    # ----------------------------------------------------------------------

    # EB MES
    if flag_m:
        gs.info(_("\nThe EB based on mean ES values:\n"))
        nmn = "{}_MES_mean".format(tmpf0)
        gs.run_command("r.series",
                       quiet=True,
                       output=nmn,
                       input=tuple(ipi),
                       method="average")
        gs.write_command("r.colors",
                         map=nmn,
                         rules="-",
                         stdin=COLORS_MES,
                         quiet=True)
        ebm = EB(simlay=nmn, reflay=tmpref0)
        if not out:
            # Add to list of layers to be removed at completion
            CLEAN_RAST.append(nmn)
        else:
            # Write layer metadata
            gs.run_command("r.support",
                           map=nmn,
                           title="Multivariate environmental similarity (MES)",
                           units="0-100 (relative score",
                           description="MES (compuated as the average of "
                           "the individual similarity layers",
                           loadhistory=tmphist)

    if flag_n:
        gs.info(_("\nThe EB based on median ES values:\n"))
        nmn = "{}_MES_median".format(tmpf0)
        gs.run_command("r.series",
                       quiet=True,
                       output=nmn,
                       input=tuple(ipi),
                       method="median")
        gs.write_command("r.colors",
                         map=nmn,
                         rules="-",
                         stdin=COLORS_MES,
                         quiet=True)
        ebn = EB(simlay=nmn, reflay=tmpref0)
        if not out:
            CLEAN_RAST.append(nmn)
        else:
            # Write layer metadata
            gs.run_command("r.support",
                           map=nmn,
                           title="Multivariate environmental similarity (MES)",
                           units="0-100 (relative score",
                           description="MES (compuated as the median of "
                           "the individual similarity layers",
                           loadhistory=tmphist)

    if flag_o:
        gs.info(_("\nThe EB based on minimum ES values:\n"))
        nmn = "{}_MES_minimum".format(tmpf0)
        gs.run_command("r.series",
                       quiet=True,
                       output=nmn,
                       input=tuple(ipi),
                       method="minimum")
        gs.write_command("r.colors",
                         map=nmn,
                         rules="-",
                         stdin=COLORS_MES,
                         quiet=True)
        ebo = EB(simlay=nmn, reflay=tmpref0)
        if not out:
            CLEAN_RAST.append(nmn)
        else:
            # Write layer metadata
            gs.run_command("r.support",
                           map=nmn,
                           title="Multivariate environmental similarity (MES)",
                           units="0-100 (relative score",
                           description="MES (compuated as the minimum of "
                           "the individual similarity layers",
                           loadhistory=tmphist)

    # EB individual layers
    if flag_i:
        ebi = {}
        for mm in xrange(len(ipi)):
            nmn = "{}_{}".format(tmpf0, ipn[mm])
            if not out:
                CLEAN_RAST.append(nmn)
            gs.run_command("g.rename", quiet=True, raster=(ipi[mm], nmn))
            gs.write_command("r.colors",
                             map=nmn,
                             rules="-",
                             stdin=COLORS_MES,
                             quiet=True)
            gs.info(_("\nThe EB for {}:\n").format(ipn[mm]))
            value = EB(simlay=nmn, reflay=tmpref0)
            ebi[ipn[mm]] = value
            gs.run_command("r.support",
                           map=nmn,
                           title="Environmental similarity (ES) for "
                           "{}".format(ipn[mm]),
                           units="0-100 (relative score",
                           description="Environmental similarity (ES) for "
                           "{}".format(ipn[mm]),
                           loadhistory=tmphist)
    else:
        gs.run_command("g.remove",
                       quiet=True,
                       flags="f",
                       type="raster",
                       name=ipi)

    if filename:
        with open(filename, "wb") as csvfile:
            fieldnames = [
                "variable", "median_region", "median_reference", "mad", "eb"
            ]
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            if flag_m:
                writer.writerow({
                    "variable": "MES_mean",
                    "median_region": ebm[1],
                    "median_reference": ebm[2],
                    "mad": ebm[0],
                    "eb": ebm[3]
                })
            if flag_n:
                writer.writerow({
                    "variable": "MES_median",
                    "median_region": ebn[1],
                    "median_reference": ebn[2],
                    "mad": ebn[0],
                    "eb": ebn[3]
                })
            if flag_o:
                writer.writerow({
                    "variable": "MES_minimum",
                    "median_region": ebo[1],
                    "median_reference": ebo[2],
                    "mad": ebo[0],
                    "eb": ebo[3]
                })
            if flag_i:
                mykeys = ebi.keys()
                for vari in mykeys:
                    ebj = ebi[vari]
                    writer.writerow({
                        "variable": vari,
                        "median_region": ebj[1],
                        "median_reference": ebj[2],
                        "mad": ebj[0],
                        "eb": ebj[3]
                    })
        gs.info(_("\nThe results are written to {}\n").format(filename))
        gs.info("\n")
예제 #35
0
def main():
    mapname = options['map']
    option = options['option']
    layer = options['layer']
    units = options['units']

    nuldev = open(os.devnull, 'w')

    if not grass.find_file(mapname, 'vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % mapname)

    if int(layer) in grass.vector_db(mapname):
        colnames = grass.vector_columns(mapname,
                                        layer,
                                        getDict=False,
                                        stderr=nuldev)
        isConnection = True
    else:
        isConnection = False
        colnames = ['cat']

    if option == 'coor':
        extracolnames = ['x', 'y', 'z']
    else:
        extracolnames = [option]

    if units == 'percent':
        unitsp = 'meters'
    elif units:
        unitsp = units
    else:
        unitsp = None

    # NOTE: we suppress -1 cat and 0 cat
    if isConnection:
        f = grass.vector_db(map=mapname)[int(layer)]
        p = grass.pipe_command('v.db.select',
                               quiet=True,
                               map=mapname,
                               layer=layer)
        records1 = []
        catcol = -1
        ncols = 0
        for line in p.stdout:
            cols = decode(line).rstrip('\r\n').split('|')
            if catcol == -1:
                ncols = len(cols)
                for i in range(0, ncols):
                    if cols[i] == f['key']:
                        catcol = i
                        break
                if catcol == -1:
                    grass.fatal(
                        _("There is a table connected to input vector map '%s', but "
                          "there is no key column '%s'.") %
                        (mapname, f['key']))
                continue
            if cols[catcol] == '-1' or cols[catcol] == '0':
                continue
            records1.append(cols[:catcol] + [int(cols[catcol])] +
                            cols[(catcol + 1):])
        p.wait()
        if p.returncode != 0:
            sys.exit(1)

        records1.sort(key=lambda r: r[catcol])

        if len(records1) == 0:
            try:
                grass.fatal(
                    _("There is a table connected to input vector map '%s', but "
                      "there are no categories present in the key column '%s'. Consider using "
                      "v.to.db to correct this.") % (mapname, f['key']))
            except KeyError:
                pass

        # fetch the requested attribute sorted by cat:
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               quiet=True,
                               map=mapname,
                               option=option,
                               layer=layer,
                               units=unitsp)
        records2 = []
        for line in p.stdout:
            fields = decode(line).rstrip('\r\n').split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records2.append([int(fields[0])] + fields[1:])
        p.wait()
        records2.sort()

        # make pre-table
        # len(records1) may not be the same as len(records2) because
        # v.db.select can return attributes that are not linked to features.
        records3 = []
        for r2 in records2:
            rec = list(filter(lambda r1: r1[catcol] == r2[0], records1))
            if len(rec) > 0:
                res = rec[0] + r2[1:]
            elif flags['d']:
                res = [r2[0]] + [''] * (ncols - 1) + r2[1:]
            else:
                continue
            records3.append(res)
    else:
        catcol = 0
        records1 = []
        p = grass.pipe_command('v.category',
                               inp=mapname,
                               layer=layer,
                               option='print')
        for line in p.stdout:
            field = int(decode(line).rstrip())
            if field > 0:
                records1.append(field)
        p.wait()
        records1.sort()
        records1 = uniq(records1)

        # make pre-table
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               quiet=True,
                               map=mapname,
                               option=option,
                               layer=layer,
                               units=unitsp)
        records3 = []
        for line in p.stdout:
            fields = decode(line).rstrip('\r\n').split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records3.append([int(fields[0])] + fields[1:])
        p.wait()
        records3.sort()

    # print table header
    if not flags['c']:
        sys.stdout.write('|'.join(colnames + extracolnames) + '\n')

    # make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units == 'percent' and option != 'coor':
        # calculate total value
        total = 0
        for r in records3:
            total += float(r[-1])

        # calculate percentages
        records4 = [float(r[-1]) * 100 / total for r in records3]
        if type(records1[0]) == int:
            records3 = [[r1] + [r4] for r1, r4 in zip(records1, records4)]
        else:
            records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    # sort results
    if options['sort']:
        if options['sort'] == 'asc':
            if options['option'] == 'coor':
                records3.sort(
                    key=lambda r: (float(r[-3]), float(r[-2]), float(r[-1])))
            else:
                records3.sort(key=lambda r: float(r[-1]))
        else:
            if options['option'] == 'coor':
                records3.sort(key=lambda r:
                              (float(r[-3]), float(r[-2]), float(r[-1])),
                              reverse=True)
            else:
                records3.sort(key=lambda r: float(r[-1]), reverse=True)

    for r in records3:
        sys.stdout.write('|'.join(map(str, r)) + '\n')
예제 #36
0
def main():

    input = options['input']
    if options['refline']:
        refline_cat = int(options['refline'])
    else:
        refline_cat = None
    nb_vertices = int(options['vertices'])
    if options['range']:
        search_range = float(options['range'])
    else:
        search_range = None
    output = options['output']
    transversals = flags['t']
    median = flags['m']

    global tmp_points_map
    global tmp_centerpoints_map
    global tmp_line_map
    global tmp_cleaned_map
    global tmp_map
    tmp_points_map = 'points_map_tmp_%d' % os.getpid()
    tmp_centerpoints_map = 'centerpoints_map_tmp_%d' % os.getpid()
    tmp_line_map = 'line_map_tmp_%d' % os.getpid()
    tmp_cleaned_map = 'cleaned_map_tmp_%d' % os.getpid()
    tmp_map = 'generaluse_map_tmp_%d' % os.getpid()

    nb_lines = grass.vector_info_topo(input)['lines']

    # Find best reference line and max distance between centerpoints of lines
    segment_input = ''
    categories = grass.pipe_command('v.category',
                                    input=input,
                                    option='print',
                                    quiet=True)
    for category in categories.stdout:
        segment_input += 'P ' + category.strip()
        segment_input += ' ' + category.strip() + ' 50%\n'

    grass.write_command('v.segment',
                        input=input,
                        output=tmp_centerpoints_map,
                        rules='-',
                        stdin=segment_input,
                        quiet=True)

    center_distances = grass.pipe_command('v.distance',
                                          from_=tmp_centerpoints_map,
                                          to=tmp_centerpoints_map,
                                          upload='dist',
                                          flags='pa',
                                          quiet=True)

    cats = []
    mean_dists = []
    count = 0
    distmax = 0
    for center in center_distances.stdout:
        if count < 2:
            count += 1
            continue
        cat = center.strip().split('|')[0]
        distsum = 0
        for x in center.strip().split('|')[1:]:
            distsum += float(x)
        mean_dist = distsum / len(center.strip().split('|')[1:])
        cats.append(cat)
        mean_dists.append(mean_dist)

    if transversals and not search_range:
        search_range = sum(mean_dists) / len(mean_dists)
        grass.message(_("Calculated search range:  %.5f." % search_range))

    if not refline_cat:
        refline_cat = sorted(zip(cats, mean_dists),
                             key=lambda tup: tup[1])[0][0]

        grass.message(
            _("Category number of chosen reference line: %s." % refline_cat))

    # Use transversals algorithm
    if transversals:

        # Break any intersections in the original lines so that
        # they do not interfere further on
        grass.run_command('v.clean',
                          input=input,
                          output=tmp_cleaned_map,
                          tool='break',
                          quiet=True)

        xmean = []
        ymean = []
        xmedian = []
        ymedian = []
        step = 100.0 / nb_vertices

        os.environ['GRASS_VERBOSE'] = '-1'

        for vertice in range(0, nb_vertices + 1):
            # v.segment sometimes cannot find points when
            # using 0% or 100% offset
            length_offset = step * vertice
            if length_offset < 0.00001:
                length_offset = 0.00001
            if length_offset > 99.99999:
                length_offset = 99.9999
            # Create endpoints of transversal
            segment_input = 'P 1 %s %.5f%% %f\n' % (refline_cat, length_offset,
                                                    search_range)
            segment_input += 'P 2 %s %.5f%% %f\n' % (
                refline_cat, length_offset, -search_range)
            grass.write_command('v.segment',
                                input=input,
                                output=tmp_points_map,
                                stdin=segment_input,
                                overwrite=True)

            # Create transversal
            grass.write_command('v.net',
                                points=tmp_points_map,
                                output=tmp_line_map,
                                operation='arcs',
                                file='-',
                                stdin='99999 1 2',
                                overwrite=True)

            # Patch transversal onto cleaned input lines
            maps = tmp_cleaned_map + ',' + tmp_line_map
            grass.run_command('v.patch',
                              input=maps,
                              out=tmp_map,
                              overwrite=True)

            # Find intersections
            grass.run_command('v.clean',
                              input=tmp_map,
                              out=tmp_line_map,
                              tool='break',
                              error=tmp_points_map,
                              overwrite=True)

            # Add categories to intersection points
            grass.run_command('v.category',
                              input=tmp_points_map,
                              out=tmp_map,
                              op='add',
                              overwrite=True)

            # Get coordinates of points
            coords = grass.pipe_command('v.to.db',
                                        map=tmp_map,
                                        op='coor',
                                        flags='p')

            count = 0
            x = []
            y = []
            for coord in coords.stdout:
                x.append(float(coord.strip().split('|')[1]))
                y.append(float(coord.strip().split('|')[2]))

            # Calculate mean and median for this transversal
            if len(x) > 0:
                xmean.append(sum(x) / len(x))
                ymean.append(sum(y) / len(y))

                x.sort()
                y.sort()

                xmedian.append((x[(len(x) - 1) / 2] + x[(len(x)) / 2]) / 2)
                ymedian.append((y[(len(y) - 1) / 2] + y[(len(y)) / 2]) / 2)

        del os.environ['GRASS_VERBOSE']

    # Use closest point algorithm
    else:

        # Get reference line calculate its length
        grass.run_command('v.extract',
                          input=input,
                          output=tmp_line_map,
                          cats=refline_cat,
                          quiet=True)

        os.environ['GRASS_VERBOSE'] = '0'
        lpipe = grass.pipe_command('v.to.db',
                                   map=tmp_line_map,
                                   op='length',
                                   flags='p')
        del os.environ['GRASS_VERBOSE']

        for l in lpipe.stdout:
            linelength = float(l.strip().split('|')[1])

        step = linelength / nb_vertices

        # Create reference points for vertice calculation
        grass.run_command('v.to.points',
                          input=tmp_line_map,
                          output=tmp_points_map,
                          dmax=step,
                          quiet=True)

        nb_points = grass.vector_info_topo(tmp_points_map)['points']

        cat = []
        x = []
        y = []

        # Get coordinates of closest points on all input lines
        if search_range:
            points = grass.pipe_command('v.distance',
                                        from_=tmp_points_map,
                                        from_layer=2,
                                        to=input,
                                        upload='to_x,to_y',
                                        dmax=search_range,
                                        flags='pa',
                                        quiet=True)
        else:
            points = grass.pipe_command('v.distance',
                                        from_=tmp_points_map,
                                        from_layer=2,
                                        to=input,
                                        upload='to_x,to_y',
                                        flags='pa',
                                        quiet=True)

        firstline = True
        for point in points.stdout:
            if firstline:
                firstline = False
                continue
            cat.append((int(point.strip().split('|')[0])))
            x.append(float(point.strip().split('|')[2]))
            y.append(float(point.strip().split('|')[3]))

        # Calculate mean coordinates
        xsum = [0] * nb_points
        ysum = [0] * nb_points
        linecount = [0] * nb_points

        for i in range(len(cat)):
            index = cat[i] - 1
            linecount[index] += 1
            xsum[index] = xsum[index] + x[i]
            ysum[index] = ysum[index] + y[i]

        xmean = [0] * nb_points
        ymean = [0] * nb_points

        for c in range(0, nb_points):
            xmean[c] = xsum[c] / linecount[c]
            ymean[c] = ysum[c] / linecount[c]

        # Calculate the median

        xmedian = [0] * nb_points
        ymedian = [0] * nb_points

        for c in range(0, nb_points):
            xtemp = []
            ytemp = []
            for i in range(len(cat)):
                if cat[i] == c + 1:
                    xtemp.append(x[i])
                    ytemp.append(y[i])
            xtemp.sort()
            ytemp.sort()
            xmedian[c] = (xtemp[(len(xtemp) - 1) / 2] +
                          xtemp[(len(xtemp)) / 2]) / 2
            ymedian[c] = (ytemp[(len(ytemp) - 1) / 2] +
                          ytemp[(len(ytemp)) / 2]) / 2

    # Create new line and write to file
    if median and nb_lines > 2:
        line = geo.Line(zip(xmedian, ymedian))
    else:
        if median and nb_lines <= 2:
            grass.message(
                _("More than 2 lines necesary for median, using mean."))
        line = geo.Line(zip(xmean, ymean))

    new = VectorTopo(output)
    new.open('w')

    new.write(line)
    new.close()
예제 #37
0
 def __unicode__(self):
     return grass.pipe_command("g.region", flags="p").communicate()[0]
예제 #38
0
def main():
    # split input images
    all_images = options['input']
    images = all_images.split(',')
    # number of images
    n_images = len(images)
    # database path
    dbopt = options['database']
    # output suffix
    suffix = options['suffix']
    # output mosaic map
    mosaic = options['output']
    output_names = []
    # name for average table
    table_ave = "t%s_average" % suffix
    # increment of one the maximum value for a correct use of range function
    max_value = int(options['max']) + 1
    # if the db path is the default one
    if dbopt.find('$GISDBASE/$LOCATION_NAME/$MAPSET') == 0:
        dbopt_split = dbopt.split('/')[-1]
        env = grass.gisenv()
        path = os.path.join(env['GISDBASE'], env['LOCATION_NAME'],
                            env['MAPSET'])
        dbpath = os.path.join(path, dbopt_split)
    else:
        if os.access(os.path.dirname(dbopt), os.W_OK):
            path = os.path.dirname(dbopt)
            dbpath = dbopt
        else:
            grass.fatal(
                _("Folder to write database files does not" +
                  " exist or is not writeable"))
    # connect to the db
    db = sqlite3.connect(dbpath)
    curs = db.cursor()
    grass.message(_("Calculating Cumulative Distribution Functions ..."))

    # number of pixels per value, summarized for all images
    numPixelValue = list(range(0, max_value))
    for n in range(0, max_value):
        numPixelValue[n] = 0

    # cumulative histogram for each value and each image
    cumulHistoValue = list(range(0, max_value))

    # set up temp region only once
    grass.use_temp_region()

    # for each image
    for i in images:
        iname = i.split('@')[0]
        # drop table if exist
        query_drop = "DROP TABLE if exists \"t%s\"" % iname
        curs.execute(query_drop)
        # create table
        query_create = "CREATE TABLE \"t%s\" (grey_value integer,pixel_frequency " % iname
        query_create += "integer, cumulative_histogram integer, cdf real)"
        curs.execute(query_create)
        index_create = "CREATE UNIQUE INDEX \"t%s_grey_value\" ON \"t%s\" (grey_value) " % (
            iname, iname)
        curs.execute(index_create)
        # set the region on the raster
        grass.run_command('g.region', raster=i)
        # calculate statistics
        stats_out = grass.pipe_command('r.stats',
                                       flags='cin',
                                       input=i,
                                       separator=':')
        stats = stats_out.communicate()[0].decode('utf-8').split('\n')[:-1]
        stats_dict = dict(s.split(':', 1) for s in stats)
        cdf = 0
        curs.execute("BEGIN")
        # for each number in the range
        for n in range(0, max_value):
            # try to insert the values otherwise insert 0

            try:
                val = int(stats_dict[str(n)])
                cdf += val
                numPixelValue[n] += val
                insert = "INSERT INTO \"t%s\" VALUES (%i, %i, %i, 0.000000)" % (
                    iname, n, val, cdf)
                curs.execute(insert)
            except:
                insert = "INSERT INTO \"t%s\" VALUES (%i, 0, %i, 0.000000)" % (
                    iname, n, cdf)
                curs.execute(insert)
            # save cumulative_histogram for the second loop
            cumulHistoValue[n] = cdf
        curs.execute("COMMIT")
        db.commit()
        # number of pixel is the cdf value
        numPixel = cdf
        # for each number in the range
        # cdf is updated using the number of non-null pixels for the current image
        curs.execute("BEGIN")
        for n in range(0, max_value):
            # select value for cumulative_histogram for the range number
            """
            select_ch = "SELECT cumulative_histogram FROM \"t%s\" WHERE " % iname
            select_ch += "(grey_value=%i)" % n
            result = curs.execute(select_ch)
            val = result.fetchone()[0]
            """
            val = cumulHistoValue[n]
            # update cdf with new value
            if val != 0 and numPixel != 0:
                update_cdf = round(float(val) / float(numPixel), 6)
                update_cdf = "UPDATE \"t%s\" SET cdf=%s WHERE (grey_value=%i)" % (
                    iname, update_cdf, n)
                curs.execute(update_cdf)

        curs.execute("COMMIT")
        db.commit()
    db.commit()
    pixelTot = 0

    # get total number of pixels divided by number of images
    # for each number in the range
    for n in range(0, max_value):
        """
        numPixel = 0
        # for each image
        for i in images:
            iname = i.split('@')[0]
            pixel_freq = "SELECT pixel_frequency FROM \"t%s\" WHERE (grey_value=%i)" % (
                                                                iname, n)
            result = curs.execute(pixel_freq)
            val = result.fetchone()[0]
            numPixel += val
        """
        # calculate number of pixel divide by number of images
        div = (int(numPixelValue[n] / n_images))
        pixelTot += div

    # drop average table
    query_drop = "DROP TABLE if exists %s" % table_ave
    curs.execute(query_drop)
    # create average table
    query_create = "CREATE TABLE %s (grey_value integer,average " % table_ave
    query_create += "integer, cumulative_histogram integer, cdf real)"
    curs.execute(query_create)
    index_create = "CREATE UNIQUE INDEX \"%s_grey_value\" ON \"%s\" (grey_value) " % (
        table_ave, table_ave)
    curs.execute(index_create)
    cHist = 0
    # for each number in the range
    curs.execute("BEGIN")
    for n in range(0, max_value):
        tot = 0
        """
        # for each image
        for i in images:
            iname = i.split('@')[0]
            # select pixel frequency
            pixel_freq = "SELECT pixel_frequency FROM \"t%s\" WHERE (grey_value=%i)" % (
                                                            iname, n)
            result = curs.execute(pixel_freq)
            val = result.fetchone()[0]
            tot += val
        """
        tot = numPixelValue[n]
        # calculate new value of pixel_frequency
        average = (tot / n_images)
        cHist = cHist + int(average)
        # insert new values into average table
        if cHist != 0 and pixelTot != 0:
            cdf = float(cHist) / float(pixelTot)
            insert = "INSERT INTO %s VALUES (%i, %i, %i, %s)" % (
                table_ave, n, int(average), cHist, cdf)
            curs.execute(insert)
    curs.execute("COMMIT")
    db.commit()

    # for each image
    grass.message(_("Reclassifying bands based on average histogram..."))
    for i in images:
        iname = i.split('@')[0]
        grass.run_command('g.region', raster=i)
        # write average rules file
        outfile = open(grass.tempfile(), 'w')
        new_grey = 0
        for n in range(0, max_value):
            select_newgrey = "SELECT b.grey_value FROM \"t%s\" as a, " % iname
            select_newgrey += "%s as b WHERE a.grey_value=%i " % (table_ave, n) \
                + "ORDER BY abs(a.cdf-b.cdf) LIMIT 1"
            # write line with old and new value
            try:
                result_new = curs.execute(select_newgrey)
                new_grey = result_new.fetchone()[0]
                out_line = "%d = %d\n" % (n, new_grey)
                outfile.write(out_line)
            except:
                out_line = "%d = %d\n" % (n, new_grey)
                outfile.write(out_line)

        outfile.close()
        outname = '%s.%s' % (iname, suffix)
        # check if a output map already exists
        result = grass.core.find_file(outname, element='cell')
        if result['fullname'] and grass.overwrite():
            grass.run_command('g.remove',
                              flags='f',
                              type='raster',
                              name=outname)
            grass.run_command('r.reclass',
                              input=i,
                              out=outname,
                              rules=outfile.name)
        elif result['fullname'] and not grass.overwrite():
            grass.warning(
                _("Raster map %s already exists and will not be overwritten" %
                  i))
        else:
            grass.run_command('r.reclass',
                              input=i,
                              out=outname,
                              rules=outfile.name)
        output_names.append(outname)
        # remove the rules file
        grass.try_remove(outfile.name)
        # write cmd history:
        grass.raster_history(outname)
    db.commit()
    db.close()
    if mosaic:
        grass.message(_("Processing mosaic <%s>..." % mosaic))
        grass.run_command('g.region', raster=all_images)
        grass.run_command('r.patch', input=output_names, output=mosaic)
예제 #39
0
def main():
    res = options['res']
    poly1 = options['map']
    if "@" in poly1:
        poly1 = poly1.split("@")[0]
    poly2 = options['query_map']
    qcol = options['query_column']
    if not res:
        cur_region = grass.region()
        res = cur_region['nsres']
    
    grass.run_command('g.region', res = res, quiet = True)
    grass.run_command('v.to.rast', type_ = 'area',
                      input_ = poly2, output = 'raster_tmp',
                      use = 'attr', attribute_column = 'cat',
                      label_column = qcol,
                      quiet = True)

    p = grass.pipe_command('r.category', map = 'raster_tmp',
                           separator = '|', quiet = True)
    cats = []
    labels = []
    for line in p.stdout:
        cats.append(line.rstrip('\r\n').split('|')[0])
        labels.append(line.rstrip('\r\n').split('|')[1])
    p.wait()

    query_dict = dict(zip(cats,labels))

    grass.run_command('v.extract', input_ = poly1,
                      output = 'vector_tmp1',
                      type_ = 'centroid',
                      overwrite = True, quiet = True)
    grass.run_command('v.category', input_ = poly1,
                      output = 'vector_tmp2',
                      option = 'add', flags = 't',
                      type_ = 'boundary',
                      overwrite = True)
    # grass.run_command('v.extract', input_ = 'vector_tmp2',
    #                   output = 'vector_tmp3',
    #                   type_ = 'boundary',
    #                   overwrite = True)
    # grass.run_command('v.edit', map_ = 'vector_tmp3',
    #                   tool = 'delete',
    #                   type_ = 'centroid',
    #                   ids = 0-99999999,
    #                   overwrite = True)
    # grass.run_command('v.category', input_ = 'vector_tmp3',
    #                   output = 'vector_tmp4',
    #                   option = 'del',
    #                   type_ = 'boundary',
    #                   overwrite = True)    
    grass.run_command('v.db.addcolumn', map_ = 'vector_tmp1',
                      column = 'rast_cat int', quiet = True)
    grass.run_command('v.db.addcolumn', map_ = 'vector_tmp1',
                      column = qcol, quiet = True)
    grass.run_command('v.what.rast', map_ = 'vector_tmp1',
                      raster = 'raster_tmp',
                      column = 'rast_cat',
                      type_ = 'centroid',
                      overwrite = True, quiet = True)    

    for key,value in query_dict.items():
        grass.run_command('v.db.update', map_ = 'vector_tmp1',
                          col = qcol, value = value,
                          where = "rast_cat = %s" % key,
                          quiet = True)

    grass.run_command('v.db.dropcolumn', map_ = 'vector_tmp1',
                      column = 'rast_cat', quiet = True)

    grass.run_command('v.edit', map_ = 'vector_tmp1',
                      tool = 'copy', bgmap = 'vector_tmp2',
                      type_ = 'boundary', cats = 0-99999999)

    sys.exit(0)
    
    grass.run_command('g.rename', vector = ('vector_tmp1', poly1),
                      overwrite = True, quiet = True)
예제 #40
0
def main():
    """Do the main work"""

    alias_output = options["alias_output"]

    bgr_mask = options["bgr_mask"]

    null_value = options["null_value"]

    bgr_output = options["bgr_output"]
    species_output = options["species_output"]

    alias, parameters = parse_bgr_input(
        options["alias_input"], options["env_maps"], options["alias_names"]
    )

    species_dict = parse_species_input(
        options["species_masks"], options["species_names"]
    )

    # Check if a mask file allready exists
    if RasterRow("MASK", Mapset().name).exist():
        gscript.verbose(
            _("A mask allready exists. Renaming existing mask to old_MASK...")
        )
        gscript.run_command(
            "g.rename", rast="MASK,{}_MASK".format(TMP_NAME), quiet=True
        )

    # Build parameter header if necessary
    header = ",".join(alias)

    # Write alias output if requested
    if alias_output:
        with open(alias_output, "w") as alias_out:
            for idx, name in enumerate(alias):
                alias_out.write("{},{}\n".format(name, parameters[idx]))

    # Check if specie output is requested and produce it
    if species_output and species_dict:
        # Write header to species output SWD file
        species_header = "species,X,Y,{}\n".format(header)

        with open(species_output, "w") as sp_out:
            sp_out.write(species_header)

        # Parse species input variables
        for species in species_dict:

            species_map = species_dict[species]
            # Zoom region to match specie map if requested
            if flags["z"]:
                gscript.verbose(
                    _("Zooming region to species {} temporarily.".format(species))
                )
                gscript.use_temp_region()
                gscript.run_command(
                    "g.region", align="@".join(species_map), zoom="@".join(species_map)
                )
            #
            # Apply specie mask
            gscript.run_command(
                "r.mask", raster="@".join(species_map), overwrite=True, quiet=True
            )

            # Export data using r.stats
            gscript.verbose(_("Producing output for species {}".format(species)))
            stats = gscript.pipe_command(
                "r.stats",
                flags="1gN",
                verbose=True,
                input=",".join(parameters),
                separator=",",
                null_value=null_value,
            )

            with open(species_output, "a") as sp_out:
                for row in stats.stdout:
                    sp_out.write("{},{}".format(species, gscript.decode(row)))

            # Redo zoom region to match specie map if it had been requested
            if flags["z"]:
                gscript.del_temp_region()
            # Remove mask
            gscript.run_command("r.mask", flags="r", quiet=True)

    # Write header to background output SWD file
    bgr_header = "bgr,X,Y,{}\n".format(",".join(alias))

    with open(bgr_output, "w") as bgr_out:
        bgr_out.write(bgr_header)

    # Process map data for background
    # Check if a mask file allready exists
    if bgr_mask:
        gscript.verbose(
            _("Using map {} as mask for the background landscape...".format(bgr_mask))
        )
        # Apply mask
        gscript.run_command("r.mask", raster=bgr_mask, overwrite=True, quiet=True)
    #
    # Export data using r.stats
    gscript.verbose(_("Producing output for background landscape"))
    stats = gscript.pipe_command(
        "r.stats",
        flags="1gN",
        input=",".join(parameters),
        separator=",",
        null_value=null_value,
    )

    with open(bgr_output, "a") as bgr_out:
        for row in stats.stdout:
            bgr_out.write("bgr,{}".format(gscript.decode(row)))

    cleanup()
예제 #41
0
def output_xsections(xsects, outfile, elev, res, river):
	"""
	Create the cross section profiles by first making a points vector from the cross sections
	loop thru the section_ids and use v.out.ascii to get the points for each cross section.
	Feed these points into r.profile to get lists of the coords and elevation at each spot along the xsection,
	and output these to the CROSS-SECTION paragraph
	"""

	# Prepare tmp vector maps to hold the points from the cross sections
	proc=os.getpid()
	xsect_pts="tmp_xsect_pts_"+str(proc)
	#xsect_pts2="tmp_xsect_pts2_"+str(proc)
	# v.to.points returns all points on the same cross section with the same cat value
	grass.run_command('v.to.points', input=xsects, output=xsect_pts, use="vertex", quiet=True)
	outfile.write("\n")
	outfile.write("BEGIN CROSS-SECTIONS:\n")
	
	# Get the list of station ids with the reaches
	# Layer 1 contains one cat for all points on the same xsect line, 
	# so v.db.select returns one row for each xsect line (not for each point on the line)
	st=grass.pipe_command('v.db.select', map=xsect_pts, layer=1, columns="reach,station_id", flags="c", quiet=True)
	station_ids=[]
	for line in st.stdout:	
		station = line.rstrip('\n').split("|")
		r,s = station[0], station[1]
		station_ids.append([r,s])

	st.stdout.close()
	st.wait()
	
	# Now loop thru those stations to create the CUTLINE and SURFACE section
	for i in range(len(station_ids)):
		station_id = station_ids[i][1].strip('\n')
		reach = station_ids[i][0].rstrip('\n')
		process_msg="Processing reach: "+reach+" at station id: "+station_id
		grass.message(process_msg)
		outfile.write(" CROSS-SECTION:\n")
		outfile.write("   STREAM ID: %s\n" % river)
		outfile.write("   REACH ID: %s\n" % reach)
		outfile.write("   STATION: %s\n" % station_id)
			
		# get point coords from this reach/station and print out CUTLINE: section
		# Save this output also for next SURFACE: section
		station_cond="station_id="+station_id
		p=grass.pipe_command('v.out.ascii', input=xsect_pts, columns="reach,station_id", 
			where=station_cond ,separator=",", layer=1, quiet=True)
		station_pts=[]
		for line in p.stdout:
			st = line.rstrip('\n').split(',')
			station_pts.append(st)

		p.stdout.close()
		p.wait()

		outfile.write("   CUTLINE:\n")
		for j in range(len(station_pts)):
			x,y = station_pts[j][0], station_pts[j][1]
			outfile.write("	 "+x+","+y+"\n")  

		# Now run the points thru r.profile, and get the elevations
		# and print elevations to SURFACE LINE: section
		outfile.write("   SURFACE LINE:\n")
		profile_pts=[]
		for k in range(len(station_pts)):
			x,y = station_pts[k][0], station_pts[k][1]
			profile_pts.append([x,y])
			
		pp=grass.pipe_command('r.profile', input=elev, coordinates=profile_pts, resolution=res, flags="g", quiet=True)
		for line in pp.stdout:
			l=line.rstrip('\n').split(" ")
			# The r.profile output has x,y in first two columns and elev in 4th column
			outfile.write("	 "+l[0]+","+l[1]+","+l[3]+"\n")
		pp.stdout.close()
		pp.wait()
		
		# CLose this CROSS-SECTION paragraph
		outfile.write(" END:\n\n")

	outfile.write("END CROSS-SECTIONS:\n\n")

	# remove temp points file
	grass.message("Removing temp vector: %s" % (xsect_pts))
	grass.run_command('g.remove', type='vector', name=xsect_pts, quiet=True, flags="f")
예제 #42
0
def reclass(inf, outf, lim, clump, diag, les):
    infile = inf
    outfile = outf
    lesser = les
    limit = lim
    clumped = clump
    diagonal = diag

    s = grass.read_command("g.region", flags='p')
    kv = grass.parse_key_val(s, sep=':')
    s = kv['projection'].strip().split()
    if s == '0':
        grass.fatal(_("xy-locations are not supported"))
        grass.fatal(_("Need projected data with grids in meters"))

    if not grass.find_file(infile)['name']:
        grass.fatal(_("Raster map <%s> not found") % infile)

    if clumped and diagonal:
        grass.fatal(_("flags c and d are mutually exclusive"))

    if clumped:
        clumpfile = infile
    else:
        clumpfile = "%s.clump.%s" % (infile.split('@')[0], outfile)
        TMPRAST.append(clumpfile)

        if not grass.overwrite():
            if grass.find_file(clumpfile)['name']:
                grass.fatal(_("Temporary raster map <%s> exists") % clumpfile)
        if diagonal:
            grass.message(_("Generating a clumped raster file including "
                            "diagonal neighbors..."))
            grass.run_command('r.clump', flags='d', input=infile,
                              output=clumpfile)
        else:
            grass.message(_("Generating a clumped raster file ..."))
            grass.run_command('r.clump', input=infile, output=clumpfile)

    if lesser:
        grass.message(_("Generating a reclass map with area size less than "
                        "or equal to %f hectares...") % limit)
    else:
        grass.message(_("Generating a reclass map with area size greater "
                        "than or equal to %f hectares...") % limit)

    recfile = outfile + '.recl'
    TMPRAST.append(recfile)

    sflags = 'aln'
    if grass.raster_info(infile)['datatype'] in ('FCELL', 'DCELL'):
        sflags += 'i'
    p1 = grass.pipe_command('r.stats', flags=sflags, input=(clumpfile, infile),
                            sep=';')
    p2 = grass.feed_command('r.reclass', input=clumpfile, output=recfile,
                            rules='-')
    rules = ''
    for line in p1.stdout:
        f = line.rstrip(os.linesep).split(';')
        if len(f) < 5:
            continue
        hectares = float(f[4]) * 0.0001
        if lesser:
            test = hectares <= limit
        else:
            test = hectares >= limit
        if test:
            rules += "%s = %s %s\n" % (f[0], f[2], f[3])
    if rules:
        p2.stdin.write(rules)
    p1.wait()
    p2.stdin.close()
    p2.wait()
    if p2.returncode != 0:
        if lesser:
            grass.fatal(_("No areas of size less than or equal to %f "
                          "hectares found.") % limit)
        else:
            grass.fatal(_("No areas of size greater than or equal to %f "
                          "hectares found.") % limit)
    grass.mapcalc("$outfile = $recfile", outfile=outfile, recfile=recfile)
예제 #43
0
def matchhist(original, target, matched):
    # pan/intensity histogram matching using numpy arrays
    grass.message(_("Histogram matching..."))

    # input images
    original = original.split('@')[0]
    target = target.split('@')[0]
    images = [original, target]

    # create a dictionary to hold arrays for each image
    arrays = {}

    for i in images:
        # calculate number of cells for each grey value for for each image
        stats_out = grass.pipe_command('r.stats', flags='cin', input=i,
                                       sep=':')
        stats = stats_out.communicate()[0].split('\n')[:-1]
        stats_dict = dict(s.split(':', 1) for s in stats)
        total_cells = 0  # total non-null cells
        for j in stats_dict:
            stats_dict[j] = int(stats_dict[j])
            if j != '*':
                total_cells += stats_dict[j]

        if total_cells < 1:
            grass.fatal(_("Input has no data. Check region settings."))

        # Make a 2x256 structured array for each image with a
        #   cumulative distribution function (CDF) for each grey value.
        #   Grey value is the integer (i4) and cdf is float (f4).

        arrays[i] = np.zeros((256, ), dtype=('i4,f4'))
        cum_cells = 0  # cumulative total of cells for sum of current and all lower grey values

        for n in range(0, 256):
            if str(n) in stats_dict:
                num_cells = stats_dict[str(n)]
            else:
                num_cells = 0

            cum_cells += num_cells

            # cdf is the the number of cells at or below a given grey value
            #   divided by the total number of cells
            cdf = float(cum_cells) / float(total_cells)

            # insert values into array
            arrays[i][n] = (n, cdf)

    # open file for reclass rules
    outfile = open(grass.tempfile(), 'w')

    for i in arrays[original]:
        # for each grey value and corresponding cdf value in original, find the
        #   cdf value in target that is closest to the target cdf value
        difference_list = []
        for j in arrays[target]:
            # make a list of the difference between each original cdf value and
            #   the target cdf value
            difference_list.append(abs(i[1] - j[1]))

        # get the smallest difference in the list
        min_difference = min(difference_list)

        for j in arrays[target]:
            # find the grey value in target that correspondes to the cdf
            #   closest to the original cdf
            if j[1] == i[1] + min_difference or j[1] == i[1] - min_difference:
                # build a reclass rules file from the original grey value and
                #   corresponding grey value from target
                out_line = "%d = %d\n" % (i[0], j[0])
                outfile.write(out_line)
                break

    outfile.close()

    # create reclass of target from reclass rules file
    result = grass.core.find_file(matched, element='cell')
    if result['fullname']:
        grass.run_command('g.remove', flags='f', quiet=True, type='raster',
                          name=matched)
        grass.run_command('r.reclass', input=original, out=matched,
                          rules=outfile.name)
    else:
        grass.run_command('r.reclass', input=original, out=matched,
                          rules=outfile.name)

    # Cleanup
    # remove the rules file
    grass.try_remove(outfile.name)

    # return reclass of target with histogram that matches original
    return matched
예제 #44
0
def main():
    infile = options['input']
    lesser = options['lesser']
    greater = options['greater']
    outfile = options['output']

    s = grass.read_command("g.region", flags = 'p')
    kv = grass.parse_key_val(s, sep = ':')
    s = kv['projection'].strip().split()
    if s == '0':
	grass.fatal(_("xy-locations are not supported"))
	grass.fatal(_("Need projected data with grids in meters"))

    if not lesser and not greater:
	grass.fatal(_("You have to specify either lesser= or greater="))
    if lesser and greater:
	grass.fatal(_("lesser= and greater= are mutually exclusive"))
    if lesser:
	limit = float(lesser)
    if greater:
	limit = float(greater)

    if not grass.find_file(infile)['name']:
	grass.fatal(_("Raster map <%s> not found") % infile)

    clumpfile = "%s.clump.%s" % (infile.split('@')[0], outfile)

    if not grass.overwrite():
	if grass.find_file(clumpfile)['name']:
	    grass.fatal(_("Temporary raster map <%s> exists") % clumpfile)

    grass.message(_("Generating a clumped raster file ..."))
    grass.run_command('r.clump', input = infile, output = clumpfile)

    if lesser:
	grass.message(_("Generating a reclass map with area size less than or equal to %f hectares...") % limit)
    else:
	grass.message(_("Generating a reclass map with area size greater than or equal to %f hectares...") % limit)

    recfile = outfile + '.recl'

    p1 = grass.pipe_command('r.stats', flags = 'aln', input = (clumpfile, infile), fs = '|')
    p2 = grass.feed_command('r.reclass', input = clumpfile, output = recfile, rules = '-')
    for line in p1.stdout:
	f = line.rstrip('\r\n').split('|')
	if len(f) < 5:
	    continue
	hectares = float(f[4]) * 0.0001
	if lesser:
	    test = hectares <= limit
	else:
	    test = hectares >= limit
	if test:
	    p2.stdin.write("%s = %s %s\n" % (f[0], f[2], f[3]))
    p1.wait()
    p2.stdin.close()
    p2.wait()

    grass.message(_("Generating output raster map <%s>...") % outfile)

    grass.mapcalc("$outfile = $recfile", outfile = outfile, recfile = recfile)
    grass.run_command('g.remove', rast = [recfile, clumpfile], quiet = True)
예제 #45
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, mask_found, rastertmp
    mask_found = False
    rastertmp = False
    #### setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = file(os.devnull, 'w')

    raster = options['raster']
    colprefix = options['column_prefix']
    vector = options['vector']
    layer = options['layer']
    percentile = options['percentile']

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
	vect_mapset = vs[1]
    else:
	vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector', mapset)['file']:
	grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    # check the input raster map
    if not grass.find_file(raster, 'cell')['file']:
	grass.fatal(_("Raster map <%s> not found") % raster)

    # check presence of raster MASK, put it aside
    mask_found = bool(grass.find_file('MASK', 'cell')['file'])
    if mask_found:
	grass.message(_("Raster MASK found, temporarily disabled"))
	grass.run_command('g.rename', rast = ('MASK', tmpname + "_origmask"), quiet = True)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align = raster)

    # prepare raster MASK
    if grass.run_command('v.to.rast', input = vector, output = rastertmp,
			 use = 'cat', quiet = True) != 0:
	grass.fatal(_("An error occurred while converting vector to raster"))

    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command('r.category', map = rastertmp, fs = ';', quiet = True)
    cats = []
    for line in p.stdout:
	cats.append(line.rstrip('\r\n').split(';')[0])
    p.wait()

    number = len(cats)
    if number < 1:
	grass.fatal(_("No categories found in raster map"))

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map = vector)[int(layer)]
    except KeyError:
	grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
	grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))

    basecols = ['n', 'min', 'max', 'range', 'mean', 'stddev', 'variance', 'cf_var', 'sum']

    # we need at least three chars to distinguish [mea]n from [med]ian
    # so colprefix can't be longer than 6 chars with DBF driver
    if dbfdriver:
	colprefix = colprefix[:6]

    # do extended stats?
    if flags['e']:
	# namespace is limited in DBF but the % value is important
	if dbfdriver:
	    perccol = "per" + percentile
	else:
	    perccol = "percentile_" + percentile
	extracols = ['first_quartile', 'median', 'third_quartile'] + [perccol]
    else:
	extracols = []

    addcols = []
    for i in basecols + extracols:
	# check if column already present
	currcolumn = ("%s_%s" % (colprefix, i))
	if dbfdriver:
	    currcolumn = currcolumn[:10]

	if currcolumn in grass.vector_columns(vector, layer).keys():
	    if not flags['c']:
		grass.fatal((_("Cannot create column <%s> (already present). ") % currcolumn) +
			    _("Use -c flag to update values in this column."))
	else:
	    if i == "n":
		coltype = "INTEGER"
	    else:
		coltype = "DOUBLE PRECISION"
	    addcols.append(currcolumn + ' ' + coltype)

    if addcols:
	grass.verbose(_("Adding columns '%s'") % addcols)
	if grass.run_command('v.db.addcolumn', map = vector, columns = addcols) != 0:
	    grass.fatal(_("Adding columns failed. Exiting."))

    # calculate statistics:
    grass.message(_("Processing data (%d categories)...") % number)

    # get rid of any earlier attempts
    grass.try_remove(sqltmp)

    colnames = []
    for var in basecols + extracols:
	colname = '%s_%s' % (colprefix, var)
	if dbfdriver:
	    colname = colname[:10]
	colnames.append(colname)

    ntabcols = len(colnames)

    # do extended stats?
    if flags['e']:
	extstat = 'e'
    else:
	extstat = ""
	
    f = file(sqltmp, 'w')

    # do the stats
    p = grass.pipe_command('r.univar', flags = 't' + 'g' + extstat, map = raster, 
                      zones = rastertmp, percentile = percentile, fs = ';')

    first_line = 1
    for line in p.stdout:
	if first_line:
	    first_line = 0
	    continue

	vars = line.rstrip('\r\n').split(';')

	f.write("UPDATE %s SET" % fi['table'])
	i = 2
	first_var = 1
	for colname in colnames:
	    value = vars[i]
	    # convert nan, +nan, -nan to NULL
	    if value.lower().endswith('nan'):
		value = 'NULL'
	    if not first_var:
		f.write(" , ")
	    else:
		first_var = 0
	    f.write(" %s=%s" % (colname, value))
	    i += 1
	    # skip n_null_cells, mean_of_abs, sum_of_abs
	    if i == 3 or i == 8 or i == 13:
		i += 1

	f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))

    p.wait()
    f.close()

    grass.message(_("Updating the database ..."))
    exitcode = grass.run_command('db.execute', input = sqltmp,
				 database = fi['database'], driver = fi['driver'])

    grass.run_command('g.remove', rast = 'MASK', quiet = True, stderr = nuldev)

    if exitcode == 0:
	grass.message((_("Statistics calculated from raster map <%s>") % raster) +
		      (_(" and uploaded to attribute table of vector map <%s>.") % vector))
    else:
	grass.warning(_("Failed to upload statistics to attribute table of vector map <%s>.") % vector)
    
    
    sys.exit(exitcode)
예제 #46
0
def main():
    if flags['r'] and flags['s']:
	grass.fatal(_("Either -r or -s flag"))

    mapname = options['map']
    option = options['option']
    layer = options['layer']
    units = options['units']

    nuldev = file(os.devnull, 'w')

    if not grass.find_file(mapname, 'vector')['file']:
	grass.fatal(_("Vector map '%s' not found in mapset search path.") % mapname)

    colnames = grass.vector_columns(mapname, layer, getDict = False, stderr = nuldev)
    if not colnames:
	colnames = ['cat']

    if option == 'coor':
	columns = ['dummy1','dummy2','dummy3']
	extracolnames = ['x','y','z']
    else:
	columns = ['dummy1']
	extracolnames = [option]

    if units in ['p','percent']:
	unitsp = 'meters'
    elif units:
	unitsp = units
    else:
	unitsp = None

    # NOTE: we suppress -1 cat and 0 cat

    if colnames:
	p = grass.pipe_command('v.db.select', quiet = True, flags='c', map = mapname, layer = layer)
	records1 = []
	for line in p.stdout:
	    cols = line.rstrip('\r\n').split('|')
	    if cols[0] == '0':
		continue
	    records1.append([int(cols[0])] + cols[1:])
	p.wait()
        if p.returncode != 0:
            sys.exit(1)
        
	records1.sort()

	if len(records1) == 0:
            try:
                f = grass.vector_db(map = mapname)[int(layer)]
                grass.fatal(_("There is a table connected to input vector map '%s', but"
                              "there are no categories present in the key column '%s'. Consider using"
                              "v.to.db to correct this.") % (mapname, f['key']))
            except KeyError:
                pass

	#fetch the requested attribute sorted by cat:
	p = grass.pipe_command('v.to.db', flags = 'p',
                               quiet = True,
			       map = mapname, option = option, columns = columns,
			       layer = layer, units = unitsp)
	records2 = []
	for line in p.stdout:
	    fields = line.rstrip('\r\n').split('|')
	    if fields[0] in ['cat', '-1', '0']:
		continue
	    records2.append([int(fields[0])] + fields[1:])
	p.wait()
	records2.sort()

	#make pre-table
	records3 = [r1 + r2[1:] for r1, r2 in zip(records1, records2)]
    else:
	records1 = []
        p = grass.pipe_command('v.category', inp = mapname, layer = layer, option = 'print')
	for line in p.stdout:
	    field = int(line.rstrip())
	    if field > 0:
		records1.append(field)
	p.wait()
	records1.sort()
	records1 = uniq(records1)

        #make pre-table
	p = grass.pipe_command('v.to.db', flags = 'p',
			       map = mapname, option = option, columns = columns,
			       layer = layer, units = unitsp)
	records3 = []
	for line in p.stdout:
	    fields = line.split('|')
	    if fields[0] in ['cat', '-1', '0']:
		continue
	    records3.append([int(fields[0])] + fields[1:])
	p.wait()
	records3.sort()

    # print table header
    sys.stdout.write('|'.join(colnames + extracolnames) + '\n')

    #make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units != '' and units in ['p','percent']:
	# calculate total area value
	areatot = 0
	for r in records3:
	    areatot += float(r[-1])

	# calculate area percentages
	records4 = [float(r[-1]) * 100 / areatot for r in records3]
	records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    if flags['s']:
	# sort
	records3.sort(key = lambda r: (r[0], r[-1]))
    elif flags['r']:
	# reverse sort
	records3.sort(key = lambda r: (r[0], r[-1]), reverse = True)

    for r in records3:
	sys.stdout.write('|'.join(map(str,r)) + '\n')